-What: /sys/bus/platform/drivers/aspeed-uart-routing/*/uart*
+What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/uart\*
Date: September 2021
Contact: Oskar Senft <osk@google.com>
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
depends on the selected file.
e.g.
- cat /sys/bus/platform/drivers/aspeed-uart-routing/*.uart_routing/uart1
+ cat /sys/bus/platform/drivers/aspeed-uart-routing/\*.uart_routing/uart1
[io1] io2 io3 io4 uart2 uart3 uart4 io6
In this case, UART1 gets its input from IO1 (physical serial port 1).
Users: OpenBMC. Proposed changes should be mailed to
openbmc@lists.ozlabs.org
-What: /sys/bus/platform/drivers/aspeed-uart-routing/*/io*
+What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/io\*
Date: September 2021
Contact: Oskar Senft <osk@google.com>
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
int rectangle_area(struct shape *this)
{
- struct rectangle *self = container_of(this, struct shape, parent);
+ struct rectangle *self = container_of(this, struct rectangle, parent);
return self->length * self->width;
};
- OMAP3 BeagleBoard : Low cost community board
compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk
+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
contains a specific memory layout, which is documented in chapter 8 of the
SiFive U5 Coreplex Series Manual <https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf>.
+ The thead,c900-plic is different from sifive,plic-1.0.0 in opensbi, the
+ T-HEAD PLIC implementation requires setting a delegation bit to allow access
+ from S-mode. So add thead,c900-plic to distinguish them.
+
maintainers:
- Sagar Kadam <sagar.kadam@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
properties:
compatible:
- items:
- - enum:
- - sifive,fu540-c000-plic
- - starfive,jh7100-plic
- - canaan,k210-plic
- - const: sifive,plic-1.0.0
+ oneOf:
+ - items:
+ - enum:
+ - sifive,fu540-c000-plic
+ - starfive,jh7100-plic
+ - canaan,k210-plic
+ - const: sifive,plic-1.0.0
+ - items:
+ - enum:
+ - allwinner,sun20i-d1-plic
+ - const: thead,c900-plic
reg:
maxItems: 1
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/davicom,dm9051.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Davicom DM9051 SPI Ethernet Controller
+
+maintainers:
+ - Joseph CHANG <josright123@gmail.com>
+
+description: |
+ The DM9051 is a fully integrated and cost-effective low pin count single
+ chip Fast Ethernet controller with a Serial Peripheral Interface (SPI).
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+properties:
+ compatible:
+ const: davicom,dm9051
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 45000000
+
+ interrupts:
+ maxItems: 1
+
+ local-mac-address: true
+
+ mac-address: true
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ # Raspberry Pi platform
+ - |
+ /* for Raspberry Pi with pin control stuff for GPIO irq */
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "davicom,dm9051";
+ reg = <0>; /* spi chip select */
+ local-mac-address = [00 00 00 00 00 00];
+ interrupt-parent = <&gpio>;
+ interrupts = <26 IRQ_TYPE_LEVEL_LOW>;
+ spi-max-frequency = <31200000>;
+ };
+ };
+++ /dev/null
-Realtek SMI-based Switches
-==========================
-
-The SMI "Simple Management Interface" is a two-wire protocol using
-bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does
-not use the MDIO protocol. This binding defines how to specify the
-SMI-based Realtek devices.
-
-Required properties:
-
-- compatible: must be exactly one of:
- "realtek,rtl8365mb" (4+1 ports)
- "realtek,rtl8366"
- "realtek,rtl8366rb" (4+1 ports)
- "realtek,rtl8366s" (4+1 ports)
- "realtek,rtl8367"
- "realtek,rtl8367b"
- "realtek,rtl8368s" (8 port)
- "realtek,rtl8369"
- "realtek,rtl8370" (8 port)
-
-Required properties:
-- mdc-gpios: GPIO line for the MDC clock line.
-- mdio-gpios: GPIO line for the MDIO data line.
-- reset-gpios: GPIO line for the reset signal.
-
-Optional properties:
-- realtek,disable-leds: if the LED drivers are not used in the
- hardware design this will disable them so they are not turned on
- and wasting power.
-
-Required subnodes:
-
-- interrupt-controller
-
- This defines an interrupt controller with an IRQ line (typically
- a GPIO) that will demultiplex and handle the interrupt from the single
- interrupt line coming out of one of the SMI-based chips. It most
- importantly provides link up/down interrupts to the PHY blocks inside
- the ASIC.
-
-Required properties of interrupt-controller:
-
-- interrupt: parent interrupt, see interrupt-controller/interrupts.txt
-- interrupt-controller: see interrupt-controller/interrupts.txt
-- #address-cells: should be <0>
-- #interrupt-cells: should be <1>
-
-- mdio
-
- This defines the internal MDIO bus of the SMI device, mostly for the
- purpose of being able to hook the interrupts to the right PHY and
- the right PHY to the corresponding port.
-
-Required properties of mdio:
-
-- compatible: should be set to "realtek,smi-mdio" for all SMI devices
-
-See net/mdio.txt for additional MDIO bus properties.
-
-See net/dsa/dsa.txt for a list of additional required and optional properties
-and subnodes of DSA switches.
-
-Examples:
-
-An example for the RTL8366RB:
-
-switch {
- compatible = "realtek,rtl8366rb";
- /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
- mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
- mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
-
- switch_intc: interrupt-controller {
- /* GPIO 15 provides the interrupt */
- interrupt-parent = <&gpio0>;
- interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- port@0 {
- reg = <0>;
- label = "lan0";
- phy-handle = <&phy0>;
- };
- port@1 {
- reg = <1>;
- label = "lan1";
- phy-handle = <&phy1>;
- };
- port@2 {
- reg = <2>;
- label = "lan2";
- phy-handle = <&phy2>;
- };
- port@3 {
- reg = <3>;
- label = "lan3";
- phy-handle = <&phy3>;
- };
- port@4 {
- reg = <4>;
- label = "wan";
- phy-handle = <&phy4>;
- };
- port@5 {
- reg = <5>;
- label = "cpu";
- ethernet = <&gmac0>;
- phy-mode = "rgmii";
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- };
-
- mdio {
- compatible = "realtek,smi-mdio", "dsa-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy0: phy@0 {
- reg = <0>;
- interrupt-parent = <&switch_intc>;
- interrupts = <0>;
- };
- phy1: phy@1 {
- reg = <1>;
- interrupt-parent = <&switch_intc>;
- interrupts = <1>;
- };
- phy2: phy@2 {
- reg = <2>;
- interrupt-parent = <&switch_intc>;
- interrupts = <2>;
- };
- phy3: phy@3 {
- reg = <3>;
- interrupt-parent = <&switch_intc>;
- interrupts = <3>;
- };
- phy4: phy@4 {
- reg = <4>;
- interrupt-parent = <&switch_intc>;
- interrupts = <12>;
- };
- };
-};
-
-An example for the RTL8365MB-VC:
-
-switch {
- compatible = "realtek,rtl8365mb";
- mdc-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
- mdio-gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
-
- switch_intc: interrupt-controller {
- interrupt-parent = <&gpio5>;
- interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <1>;
- };
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>;
- port@0 {
- reg = <0>;
- label = "swp0";
- phy-handle = <ðphy0>;
- };
- port@1 {
- reg = <1>;
- label = "swp1";
- phy-handle = <ðphy1>;
- };
- port@2 {
- reg = <2>;
- label = "swp2";
- phy-handle = <ðphy2>;
- };
- port@3 {
- reg = <3>;
- label = "swp3";
- phy-handle = <ðphy3>;
- };
- port@6 {
- reg = <6>;
- label = "cpu";
- ethernet = <&fec1>;
- phy-mode = "rgmii";
- tx-internal-delay-ps = <2000>;
- rx-internal-delay-ps = <2000>;
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
- };
-
- mdio {
- compatible = "realtek,smi-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
-
- ethphy0: phy@0 {
- reg = <0>;
- interrupt-parent = <&switch_intc>;
- interrupts = <0>;
- };
- ethphy1: phy@1 {
- reg = <1>;
- interrupt-parent = <&switch_intc>;
- interrupts = <1>;
- };
- ethphy2: phy@2 {
- reg = <2>;
- interrupt-parent = <&switch_intc>;
- interrupts = <2>;
- };
- ethphy3: phy@3 {
- reg = <3>;
- interrupt-parent = <&switch_intc>;
- interrupts = <3>;
- };
- };
-};
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/realtek.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek switches for unmanaged switches
+
+allOf:
+ - $ref: dsa.yaml#
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+description:
+ Realtek advertises these chips as fast/gigabit switches or unmanaged
+ switches. They can be controlled using different interfaces, like SMI,
+ MDIO or SPI.
+
+ The SMI "Simple Management Interface" is a two-wire protocol using
+ bit-banged GPIO that while it reuses the MDIO lines MCK and MDIO does
+ not use the MDIO protocol. This binding defines how to specify the
+ SMI-based Realtek devices. The realtek-smi driver is a platform driver
+ and it must be inserted inside a platform node.
+
+ The MDIO-connected switches use MDIO protocol to access their registers.
+ The realtek-mdio driver is an MDIO driver and it must be inserted inside
+ an MDIO node.
+
+properties:
+ compatible:
+ enum:
+ - realtek,rtl8365mb
+ - realtek,rtl8366
+ - realtek,rtl8366rb
+ - realtek,rtl8366s
+ - realtek,rtl8367
+ - realtek,rtl8367b
+ - realtek,rtl8367rb
+ - realtek,rtl8367s
+ - realtek,rtl8368s
+ - realtek,rtl8369
+ - realtek,rtl8370
+ description: |
+ realtek,rtl8365mb: 4+1 ports
+ realtek,rtl8366: 5+1 ports
+ realtek,rtl8366rb: 5+1 ports
+ realtek,rtl8366s: 5+1 ports
+ realtek,rtl8367:
+ realtek,rtl8367b:
+ realtek,rtl8367rb: 5+2 ports
+ realtek,rtl8367s: 5+2 ports
+ realtek,rtl8368s: 8 ports
+ realtek,rtl8369: 8+1 ports
+ realtek,rtl8370: 8+2 ports
+
+ mdc-gpios:
+ description: GPIO line for the MDC clock line.
+ maxItems: 1
+
+ mdio-gpios:
+ description: GPIO line for the MDIO data line.
+ maxItems: 1
+
+ reset-gpios:
+ description: GPIO to be used to reset the whole device
+ maxItems: 1
+
+ realtek,disable-leds:
+ type: boolean
+ description: |
+ if the LED drivers are not used in the hardware design,
+ this will disable them so they are not turned on
+ and wasting power.
+
+ interrupt-controller:
+ type: object
+ description: |
+ This defines an interrupt controller with an IRQ line (typically
+ a GPIO) that will demultiplex and handle the interrupt from the single
+ interrupt line coming out of one of the Realtek switch chips. It most
+ importantly provides link up/down interrupts to the PHY blocks inside
+ the ASIC.
+
+ properties:
+
+ interrupt-controller: true
+
+ interrupts:
+ maxItems: 1
+ description:
+ A single IRQ line from the switch, either active LOW or HIGH
+
+ '#address-cells':
+ const: 0
+
+ '#interrupt-cells':
+ const: 1
+
+ required:
+ - interrupt-controller
+ - '#address-cells'
+ - '#interrupt-cells'
+
+ mdio:
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ const: realtek,smi-mdio
+
+if:
+ required:
+ - reg
+
+then:
+ not:
+ required:
+ - mdc-gpios
+ - mdio-gpios
+ - mdio
+
+ properties:
+ mdc-gpios: false
+ mdio-gpios: false
+ mdio: false
+
+else:
+ required:
+ - mdc-gpios
+ - mdio-gpios
+ - mdio
+ - reset-gpios
+
+required:
+ - compatible
+
+ # - mdc-gpios
+ # - mdio-gpios
+ # - reset-gpios
+ # - mdio
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ platform {
+ switch {
+ compatible = "realtek,rtl8366rb";
+ /* 22 = MDIO (has input reads), 21 = MDC (clock, output only) */
+ mdc-gpios = <&gpio0 21 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio0 22 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+
+ switch_intc1: interrupt-controller {
+ /* GPIO 15 provides the interrupt */
+ interrupt-parent = <&gpio0>;
+ interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ phy-handle = <&phy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ phy-handle = <&phy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ phy-handle = <&phy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ phy-handle = <&phy3>;
+ };
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ phy-handle = <&phy4>;
+ };
+ port@5 {
+ reg = <5>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "rgmii";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc1>;
+ interrupts = <0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc1>;
+ interrupts = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc1>;
+ interrupts = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc1>;
+ interrupts = <3>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ interrupt-parent = <&switch_intc1>;
+ interrupts = <12>;
+ };
+ };
+ };
+ };
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ platform {
+ switch {
+ compatible = "realtek,rtl8365mb";
+ mdc-gpios = <&gpio1 16 GPIO_ACTIVE_HIGH>;
+ mdio-gpios = <&gpio1 17 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+
+ switch_intc2: interrupt-controller {
+ interrupt-parent = <&gpio5>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ label = "swp0";
+ phy-handle = <ðphy0>;
+ };
+ port@1 {
+ reg = <1>;
+ label = "swp1";
+ phy-handle = <ðphy1>;
+ };
+ port@2 {
+ reg = <2>;
+ label = "swp2";
+ phy-handle = <ðphy2>;
+ };
+ port@3 {
+ reg = <3>;
+ label = "swp3";
+ phy-handle = <ðphy3>;
+ };
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&fec1>;
+ phy-mode = "rgmii";
+ tx-internal-delay-ps = <2000>;
+ rx-internal-delay-ps = <2000>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+
+ mdio {
+ compatible = "realtek,smi-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ interrupt-parent = <&switch_intc2>;
+ interrupts = <0>;
+ };
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&switch_intc2>;
+ interrupts = <1>;
+ };
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ interrupt-parent = <&switch_intc2>;
+ interrupts = <2>;
+ };
+ ethphy3: ethernet-phy@3 {
+ reg = <3>;
+ interrupt-parent = <&switch_intc2>;
+ interrupts = <3>;
+ };
+ };
+ };
+ };
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@29 {
+ compatible = "realtek,rtl8367s";
+ reg = <29>;
+
+ reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>;
+
+ switch_intc3: interrupt-controller {
+ interrupt-parent = <&gpio0>;
+ interrupts = <11 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan4";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan3";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan1";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@7 {
+ reg = <7>;
+ ethernet = <ðernet>;
+ phy-mode = "rgmii";
+ tx-internal-delay-ps = <2000>;
+ rx-internal-delay-ps = <0>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
calibration data is generic and specific calibration data should be
pulled from the OTP ROM
+ mediatek,disable-radar-background:
+ type: boolean
+ description:
+ Disable/enable radar/CAC detection running on a dedicated offchannel
+ chain available on some hw.
+ Background radar/CAC detection allows to avoid the CAC downtime
+ switching on a different channel during CAC detection on the selected
+ radar channel.
+
led:
type: object
$ref: /schemas/leds/common.yaml#
--- /dev/null
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/transmit-amplitude.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common PHY and network PCS transmit amplitude property binding
+
+description:
+ Binding describing the peak-to-peak transmit amplitude for common PHYs
+ and network PCSes.
+
+maintainers:
+ - Marek Behún <kabel@kernel.org>
+
+properties:
+ tx-p2p-microvolt:
+ description:
+ Transmit amplitude voltages in microvolts, peak-to-peak. If this property
+ contains multiple values for various PHY modes, the
+ 'tx-p2p-microvolt-names' property must be provided and contain
+ corresponding mode names.
+
+ tx-p2p-microvolt-names:
+ description: |
+ Names of the modes corresponding to voltages in the 'tx-p2p-microvolt'
+ property. Required only if multiple voltages are provided.
+
+ If a value of 'default' is provided, the system should use it for any PHY
+ mode that is otherwise not defined here. If 'default' is not provided, the
+ system should use manufacturer default value.
+ minItems: 1
+ maxItems: 16
+ items:
+ enum:
+ - default
+
+ # ethernet modes
+ - sgmii
+ - qsgmii
+ - xgmii
+ - 1000base-x
+ - 2500base-x
+ - 5gbase-r
+ - rxaui
+ - xaui
+ - 10gbase-kr
+ - usxgmii
+ - 10gbase-r
+ - 25gbase-r
+
+ # PCIe modes
+ - pcie
+ - pcie1
+ - pcie2
+ - pcie3
+ - pcie4
+ - pcie5
+ - pcie6
+
+ # USB modes
+ - usb
+ - usb-ls
+ - usb-fs
+ - usb-hs
+ - usb-ss
+ - usb-ss+
+ - usb-4
+
+ # storage modes
+ - sata
+ - ufs-hs
+ - ufs-hs-a
+ - ufs-hs-b
+
+ # display modes
+ - lvds
+ - dp
+ - dp-rbr
+ - dp-hbr
+ - dp-hbr2
+ - dp-hbr3
+ - dp-uhbr-10
+ - dp-uhbr-13.5
+ - dp-uhbr-20
+
+ # camera modes
+ - mipi-dphy
+ - mipi-dphy-univ
+ - mipi-dphy-v2.5-univ
+
+dependencies:
+ tx-p2p-microvolt-names: [ tx-p2p-microvolt ]
+
+additionalProperties: true
+
+examples:
+ - |
+ phy: phy {
+ #phy-cells = <1>;
+ tx-p2p-microvolt = <915000>, <1100000>, <1200000>;
+ tx-p2p-microvolt-names = "2500base-x", "usb-hs", "usb-ss";
+ };
struct iov_iter *iter,
netfs_io_terminated_t term_func,
void *term_func_priv);
+
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
With a termination handler function pointer::
indicating whether the termination is definitely happening in the caller's
context.
+ * ``query_occupancy()``
+
+ [Required] Called to find out where the next piece of data is within a
+ particular region of the cache. The start and length of the region to be
+ queried are passed in, along with the granularity to which the answer needs
+ to be aligned. The function passes back the start and length of the data,
+ if any, available within that region. Note that there may be a hole at the
+ front.
+
+ It returns 0 if some data was found, -ENODATA if there was no usable data
+ within the region or -ENOBUFS if there is no caching on this file.
+
Note that these methods are passed a pointer to the cache resource structure,
not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
Level: Advanced
-Garbage collect fbdev scrolling acceleration
---------------------------------------------
-
-Scroll acceleration has been disabled in fbcon. Now it works as the old
-SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
-removed from fbcon_ops.
-Remaining tasks:
-
-- a bunch of the hooks in fbcon_ops could be removed or simplified by calling
- directly instead of the function table (with a switch on p->rotate)
-
-- fb_copyarea is unused after this, and can be deleted from all drivers
-
-- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
- well as cfb_copyarea
-
-Note that not all acceleration code can be deleted, since clearing and cursor
-support is still accelerated, which might be good candidates for further
-deletion projects.
-
-Contact: Daniel Vetter
-
-Level: Intermediate
-
idr_init_base()
---------------
'B' 00-1F linux/cciss_ioctl.h conflict!
'B' 00-0F include/linux/pmu.h conflict!
'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
+'B' 00-0F xen/xenbus_dev.h conflict!
'C' all linux/soundcard.h conflict!
'C' 01-2F linux/capi.h conflict!
'C' F0-FF drivers/net/wan/cosa.h conflict!
'F' 80-8F linux/arcfb.h conflict!
'F' DD video/sstfb.h conflict!
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
+'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
'H' 00-7F linux/hiddev.h conflict!
'H' 00-0F linux/hidraw.h conflict!
'H' 01 linux/mei.h conflict!
'P' 60-6F sound/sscape_ioctl.h conflict!
'P' 00-0F drivers/usb/class/usblp.c conflict!
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
+'P' 00-0F xen/privcmd.h conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
M: soc@kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+C: irc://irc.libera.chat/armlinux
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/boot/dts/Makefile
F: arch/arm64/boot/dts/Makefile
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+C: irc://irc.libera.chat/armlinux
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/mach-*/
F: arch/arm/plat-*/
F: drivers/mailbox/apple-mailbox.c
F: drivers/pinctrl/pinctrl-apple-gpio.c
F: drivers/soc/apple/*
+F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
F: include/linux/apple-mailbox.h
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/power/pd-samsung.yaml
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath6kl/
ATI_REMOTE2 DRIVER
DMA-BUF HEAPS FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
-R: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
R: Liam Mark <lmark@codeaurora.org>
R: Laura Abbott <labbott@redhat.com>
R: Brian Starkey <Brian.Starkey@arm.com>
F: drivers/gpu/drm/rockchip/
DRM DRIVERS FOR STI
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Alain Volmat <alain.volmat@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR STM
M: Yannick Fertre <yannick.fertre@foss.st.com>
+M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
M: Philippe Cornu <philippe.cornu@foss.st.com>
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
ETHERNET BRIDGE
M: Roopa Prabhu <roopa@nvidia.com>
-M: Nikolay Aleksandrov <nikolay@nvidia.com>
+M: Nikolay Aleksandrov <razor@blackwall.org>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
W: http://floatingpoint.sourceforge.net/emulator/index.html
F: arch/x86/math-emu/
+FRAMEBUFFER CORE
+M: Daniel Vetter <daniel@ffwll.ch>
+F: drivers/video/fbdev/core/
+S: Odd Fixes
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
FRAMEBUFFER LAYER
M: Helge Deller <deller@gmx.de>
L: linux-fbdev@vger.kernel.org
F: drivers/ata/pata_arasan_cf.c
F: include/linux/pata_arasan_cf_data.h
+LIBATA PATA DRIVERS
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+L: linux-ide@vger.kernel.org
+F: drivers/ata/ata_*.c
+F: drivers/ata/pata_*.c
+
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org
F: kernel/sched/membarrier.c
MEMBLOCK
-M: Mike Rapoport <rppt@linux.ibm.com>
+M: Mike Rapoport <rppt@kernel.org>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/core-api/boot-time-mm.rst
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
C: irc://irc.libera.chat/netfilter
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next.git
F: include/linux/netfilter*
F: include/linux/netfilter/
F: include/net/netfilter/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@hammerspace.com>
-M: Anna Schumaker <anna.schumaker@netapp.com>
+M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
W: http://client.linux-nfs.org
M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/devicetree
W: http://www.devicetree.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/ABI/testing/sysfs-firmware-ofw
M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/devicetree
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/devicetree/
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Mark Rutland <mark.rutland@arm.com>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-R: Jiri Olsa <jolsa@redhat.com>
+R: Jiri Olsa <jolsa@kernel.org>
R: Namhyung Kim <namhyung@kernel.org>
L: linux-perf-users@vger.kernel.org
L: linux-kernel@vger.kernel.org
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath11k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M: ath9k-devel@qca.qualcomm.com
+M: Toke Høiland-Jørgensen <toke@toke.dk>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Maintained
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
+M: Loic Poulain <loic.poulain@linaro.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
-T: git git://github.com/KrasnikovEugene/wcn36xx.git
F: drivers/net/wireless/ath/wcn36xx/
QUANTENNA QTNFMAC WIRELESS DRIVER
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
+RENESAS R-CAR SATA DRIVER
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+S: Supported
+L: linux-ide@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
+F: drivers/ata/sata_rcar.c
+
RENESAS R-CAR THERMAL DRIVERS
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
L: linux-renesas-soc@vger.kernel.org
S390
M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
-M: Christian Borntraeger <borntraeger@linux.ibm.com>
-R: Alexander Gordeev <agordeev@linux.ibm.com>
+M: Alexander Gordeev <agordeev@linux.ibm.com>
+R: Christian Borntraeger <borntraeger@linux.ibm.com>
R: Sven Schnelle <svens@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-samsung-soc@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
W: http://www.winischhofer.at/linuxsisusbvga.shtml
F: drivers/usb/misc/sisusbvga/
+SL28 CPLD MFD DRIVER
+M: Michael Walle <michael@walle.cc>
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/kontron,sl28cpld-gpio.yaml
+F: Documentation/devicetree/bindings/hwmon/kontron,sl28cpld-hwmon.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/kontron,sl28cpld-intc.yaml
+F: Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
+F: Documentation/devicetree/bindings/pwm/kontron,sl28cpld-pwm.yaml
+F: Documentation/devicetree/bindings/watchdog/kontron,sl28cpld-wdt.yaml
+F: drivers/gpio/gpio-sl28cpld.c
+F: drivers/hwmon/sl28cpld-hwmon.c
+F: drivers/irqchip/irq-sl28cpld.c
+F: drivers/pwm/pwm-sl28cpld.c
+F: drivers/watchdog/sl28cpld_wdt.c
+
SLAB ALLOCATOR
M: Christoph Lameter <cl@linux.com>
M: Pekka Enberg <penberg@kernel.org>
F: sound/soc/sti/
STI CEC DRIVER
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Alain Volmat <alain.volmat@foss.st.com>
S: Maintained
F: Documentation/devicetree/bindings/media/stih-cec.txt
F: drivers/media/cec/platform/sti/
F: Documentation/trace/hwlat_detector.rst
F: arch/*/kernel/trace.c
+Real-time Linux Analysis (RTLA) tools
+M: Daniel Bristot de Oliveira <bristot@kernel.org>
+M: Steven Rostedt <rostedt@goodmis.org>
+L: linux-trace-devel@vger.kernel.org
+S: Maintained
+F: Documentation/tools/rtla/
+F: tools/tracing/rtla/
+
TRADITIONAL CHINESE DOCUMENTATION
M: Hu Haowen <src.res@email.cn>
L: linux-doc-tw-discuss@lists.sourceforge.net
VERSION = 5
PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION = -rc2
-NAME = Gobble Gobble
+EXTRAVERSION = -rc4
+NAME = Superb Owl
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
logicpd-som-lv-37xx-devkit.dtb \
omap3430-sdp.dtb \
omap3-beagle.dtb \
+ omap3-beagle-ab4.dtb \
omap3-beagle-xm.dtb \
omap3-beagle-xm-ab.dtb \
omap3-cm-t3517.dtb \
2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */
>;
tx-num-evt = <16>;
- rt-num-evt = <16>;
+ rx-num-evt = <16>;
status = "okay";
};
target-module@48210000 {
compatible = "ti,sysc-omap4-simple", "ti,sysc";
power-domains = <&prm_mpu>;
- clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>;
+ clocks = <&mpu_clkctrl DRA7_MPU_MPU_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
<0x58000014 4>;
reg-names = "rev", "syss";
ti,syss-mask = <1>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 0>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 11>;
clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
#address-cells = <1>;
#size-cells = <1>;
SYSC_OMAP2_SOFTRESET |
SYSC_OMAP2_AUTOIDLE)>;
ti,syss-mask = <1>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
clock-names = "fck", "dss_clk";
#address-cells = <1>;
#size-cells = <1>;
compatible = "vivante,gc";
reg = <0x0 0x700>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&dss_clkctrl DRA7_BB2D_CLKCTRL 0>;
+ clocks = <&dss_clkctrl DRA7_DSS_BB2D_CLKCTRL 0>;
clock-names = "core";
};
};
ti,no-reset-on-init;
ti,no-idle;
timer@0 {
- assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>;
+ assigned-clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER1_CLKCTRL 24>;
assigned-clock-parents = <&sys_32k_ck>;
};
};
MX23_PAD_LCD_RESET__GPIO_1_18
MX23_PAD_PWM3__GPIO_1_29
MX23_PAD_PWM4__GPIO_1_30
- MX23_PAD_SSP1_DETECT__SSP1_DETECT
>;
fsl,drive-strength = <MXS_DRIVE_4mA>;
fsl,voltage = <MXS_VOLTAGE_HIGH>;
* Author: Fabio Estevam <fabio.estevam@freescale.com>
*/
+#include <dt-bindings/gpio/gpio.h>
+
/ {
aliases {
backlight = &backlight;
MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0
>;
};
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- non-removable;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
- assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
+ assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
timeout-sec = <40>;
};
};
uart_A: serial@84c0 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x84c0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
fifo-size = <128>;
};
uart_B: serial@84dc {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x84dc 0x18>;
interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
uart_C: serial@8700 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x8700 0x18>;
interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
uart_AO: serial@4c0 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart";
reg = <0x4c0 0x18>;
interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
&uart_AO {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart";
+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_A {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_B {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_C {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&usb0 {
};
&uart_AO {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart";
+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_A {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_B {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_C {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&usb0 {
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+
+#include "omap3-beagle.dts"
+
+/ {
+ model = "TI OMAP3 BeagleBoard A to B4";
+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
+};
+
+/*
+ * Workaround for capacitor C70 issue, see "Boards revision A and < B5"
+ * section at https://elinux.org/BeagleBoard_Community
+ */
+
+/* Unusable as clocksource because of unreliable oscillator */
+&counter32k {
+ status = "disabled";
+};
+
+/* Unusable as clockevent because of unreliable oscillator, allow to idle */
+&timer1_target {
+ /delete-property/ti,no-reset-on-init;
+ /delete-property/ti,no-idle;
+ timer@0 {
+ /delete-property/ti,timer-alwon;
+ };
+};
+
+/* Preferred always-on timer for clocksource */
+&timer12_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ /* Always clocked by secure_32k_fck */
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt2_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
phys = <0 &hsusb2_phy>;
};
-/* Unusable as clocksource because of unreliable oscillator */
-&counter32k {
- status = "disabled";
-};
-
-/* Unusable as clockevent because if unreliable oscillator, allow to idle */
-&timer1_target {
- /delete-property/ti,no-reset-on-init;
- /delete-property/ti,no-idle;
- timer@0 {
- /delete-property/ti,timer-alwon;
- };
-};
-
-/* Preferred always-on timer for clocksource */
-&timer12_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- /* Always clocked by secure_32k_fck */
- };
-};
-
-/* Preferred timer for clockevent */
-&timer2_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- assigned-clocks = <&gpt2_fck>;
- assigned-clock-parents = <&sys_ck>;
- };
-};
-
&twl_gpio {
ti,use-leds;
/* pullups: BIT(1) */
#address-cells = <1>;
#size-cells = <0>;
reg = <0x41>;
- irq-over-gpio;
irq-gpios = <&gpiopinctrl 29 0x4>;
id = <0>;
blocks = <0x5>;
cap-sd-highspeed;
cap-mmc-highspeed;
/* All direction control is used */
- st,sig-dir-cmd;
- st,sig-dir-dat0;
- st,sig-dir-dat2;
- st,sig-dir-dat31;
st,sig-pin-fbclk;
full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
static int crypto_blake2s_update_arm(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
+ return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress);
+ return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ put_device(&pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
- put_device(&pdev->dev);
return r;
}
for_each_matching_node(np, ti_clkctrl_match_table) {
ret = _setup_clkctrl_provider(np);
- if (ret)
+ if (ret) {
+ of_node_put(np);
break;
+ }
}
return ret;
menuconfig ARCH_INTEL_SOCFPGA
bool "Altera SOCFPGA family"
depends on ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
select PL310_ERRATA_727915
select PL310_ERRATA_753970 if PL310
select PL310_ERRATA_769419
+ select RESET_CONTROLLER
if ARCH_INTEL_SOCFPGA
config SOCFPGA_SUSPEND
config ARM64_ERRATUM_2051678
bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
+ default y
help
This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
Affected Coretex-A510 might not respect the ordering rules for
If unsure, say Y.
+config ARM64_ERRATUM_2077057
+ bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 2077057.
+ Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
+ expected, but a Pointer Authentication trap is taken instead. The
+ erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
+ EL1 to cause a return to EL2 with a guest controlled ELR_EL2.
+
+ This can only happen when EL2 is stepping EL1.
+
+ When these conditions occur, the SPSR_EL2 value is unchanged from the
+ previous guest entry, and can be restored from the in-memory copy.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_2119858
bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y
help
This enables support for Toshiba Visconti SoCs Family.
-config ARCH_VULCAN
- def_bool n
-
config ARCH_XGENE
bool "AppliedMicro X-Gene SOC Family"
help
no-map;
};
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
regulator-always-on;
};
- reserved-memory {
- /* TEE Reserved Memory */
- bl32_reserved: bl32@5000000 {
- reg = <0x0 0x05300000 0x0 0x2000000>;
- no-map;
- };
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
rtc1 = &vrtc;
};
- dioo2133: audio-amplifier-0 {
+ dio2133: audio-amplifier-0 {
compatible = "simple-audio-amplifier";
enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
VCC-supply = <&vcc_5v>;
audio-widgets = "Line", "Lineout";
audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>,
<&tdmin_b>, <&tdmin_c>, <&tdmin_lb>,
- <&dioo2133>;
+ <&dio2133>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
no-map;
};
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_5v>;
- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
enable-active-high;
regulator-always-on;
regulator-always-on;
};
- reserved-memory {
- /* TEE Reserved Memory */
- bl32_reserved: bl32@5000000 {
- reg = <0x0 0x05300000 0x0 0x2000000>;
- no-map;
- };
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
};
};
+&ftm_alarm0 {
+ status = "okay";
+};
+
&gpio1 {
gpio-line-names =
"", "", "", "", "", "", "", "",
status = "okay";
ports {
- port@1 {
- reg = <1>;
+ port@0 {
+ reg = <0>;
mipi1_sensor_ep: endpoint {
remote-endpoint = <&camera1_ep>;
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
status = "disabled";
- port@0 {
+ port {
lcdif_mipi_dsi: endpoint {
remote-endpoint = <&mipi_dsi_lcdif_in>;
};
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
+ port@1 {
+ reg = <1>;
csi1_mipi_ep: endpoint {
remote-endpoint = <&csi1_ep>;
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
+ port@1 {
+ reg = <1>;
csi2_mipi_ep: endpoint {
remote-endpoint = <&csi2_ep>;
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "tqm-tlv320aic32";
+ model = "imx-audio-tlv320aic32x4";
ssi-controller = <&sai3>;
audio-codec = <&tlv320aic3x04>;
};
model = "Texas Instruments J721S2 EVM";
chosen {
- stdout-path = "serial10:115200n8";
- bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000";
+ stdout-path = "serial2:115200n8";
+ bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,2880000";
+ };
+
+ aliases {
+ serial1 = &mcu_uart0;
+ serial2 = &main_uart8;
+ mmc0 = &main_sdhci0;
+ mmc1 = &main_sdhci1;
+ can0 = &main_mcan16;
+ can1 = &mcu_mcan0;
+ can2 = &mcu_mcan1;
};
evm_12v0: fixedregulator-evm12v0 {
#address-cells = <2>;
#size-cells = <2>;
- aliases {
- serial0 = &wkup_uart0;
- serial1 = &mcu_uart0;
- serial2 = &main_uart0;
- serial3 = &main_uart1;
- serial4 = &main_uart2;
- serial5 = &main_uart3;
- serial6 = &main_uart4;
- serial7 = &main_uart5;
- serial8 = &main_uart6;
- serial9 = &main_uart7;
- serial10 = &main_uart8;
- serial11 = &main_uart9;
- mmc0 = &main_sdhci0;
- mmc1 = &main_sdhci1;
- can0 = &main_mcan16;
- can1 = &mcu_mcan0;
- can2 = &mcu_mcan1;
- can3 = &main_mcan3;
- can4 = &main_mcan5;
- };
-
chosen { };
cpus {
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2077057
+ {
+ .desc = "ARM erratum 2077057",
+ .capability = ARM64_WORKAROUND_2077057,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
+ },
+#endif
#ifdef CONFIG_ARM64_ERRATUM_2064142
{
.desc = "ARM erratum 2064142",
xfer_to_guest_mode_work_pending();
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
* Enter the guest
*/
trace_kvm_entry(*vcpu_pc(vcpu));
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+ ret = kvm_arm_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
kvm_arch_vcpu_ctxsync_fp(vcpu);
/*
- * We may have taken a host interrupt in HYP mode (ie
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
*
- * We're now back in SVC mode, with interrupts
- * disabled. Enabling the interrupts now will have
- * the effect of taking the interrupt again, in SVC
- * mode this time.
+ * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
+ * context synchronization event) is necessary to ensure that
+ * pending interrupts are taken.
*/
local_irq_enable();
+ isb();
+ local_irq_disable();
+
+ guest_timing_exit_irqoff();
+
+ local_irq_enable();
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest we
- * account that tick as being spent in the guest. We enable
- * preemption after calling guest_exit() so that if we get
- * preempted we make sure ticks after that is not counted as
- * guest time.
- */
- guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/* Exit types that need handling before we can be preempted */
{
struct kvm_run *run = vcpu->run;
+ if (ARM_SERROR_PENDING(exception_index)) {
+ /*
+ * The SError is handled by handle_exit_early(). If the guest
+ * survives it will re-execute the original instruction.
+ */
+ return 1;
+ }
+
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
return false;
}
+static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ /*
+ * Check for the conditions of Cortex-A510's #2077057. When these occur
+ * SPSR_EL2 can't be trusted, but isn't needed either as it is
+ * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
+ * Are we single-stepping the guest, and took a PAC exception from the
+ * active-not-pending state?
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
+ vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
+ *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
+ ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
+
+ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
- vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+ synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
- if (ARM_SERROR_PENDING(*exit_code)) {
+ if (ARM_SERROR_PENDING(*exit_code) &&
+ ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
/*
IRQCHIP_STATE_PENDING,
&val);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ } else if (vgic_irq_is_mapped_level(irq)) {
+ val = vgic_get_phys_line_level(irq);
} else {
val = irq_is_pending(irq);
}
WORKAROUND_1463225
WORKAROUND_1508412
WORKAROUND_1542419
-WORKAROUND_2064142
-WORKAROUND_2038923
WORKAROUND_1902691
+WORKAROUND_2038923
+WORKAROUND_2064142
+WORKAROUND_2077057
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
label = "HDMI OUT";
type = "a";
+ ddc-en-gpios = <&gpa 25 GPIO_ACTIVE_HIGH>;
+
port {
hdmi_con: endpoint {
remote-endpoint = <&dw_hdmi_out>;
gpio = <&gpf 14 GPIO_ACTIVE_LOW>;
enable-active-high;
};
-
- hdmi_power: fixedregulator@3 {
- compatible = "regulator-fixed";
-
- regulator-name = "hdmi_power";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
-
- gpio = <&gpa 25 0>;
- enable-active-high;
- };
};
&ext {
pinctrl-names = "default";
pinctrl-0 = <&pins_hdmi_ddc>;
- hdmi-5v-supply = <&hdmi_power>;
-
ports {
#address-cells = <1>;
#size-cells = <0>;
return -ENOIOCTLCMD;
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_mips_callbacks->vcpu_run(vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
lose_fpu(1);
local_irq_disable();
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
trace_kvm_enter(vcpu);
/*
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(vcpu);
+ r = kvm_mips_vcpu_enter_exit(vcpu);
+
+ /*
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
+ *
+ * TODO: is there a barrier which ensures that pending interrupts are
+ * recognised? Currently this just hopes that the CPU takes any pending
+ * interrupts between the enable and disable.
+ */
+ local_irq_enable();
+ local_irq_disable();
trace_kvm_out(vcpu);
- guest_exit_irqoff();
+ guest_timing_exit_irqoff();
local_irq_enable();
out:
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
return ret;
}
+int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_exit_irqoff();
+ ret = __kvm_mips_handle_exit(vcpu);
+ guest_state_enter_irqoff();
+
+ return ret;
+}
+
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
#include <asm/barrier.h>
#include <linux/atomic.h>
+/* compiler build environment sanity checks: */
+#if !defined(CONFIG_64BIT) && defined(__LP64__)
+#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
+#endif
+#if defined(CONFIG_64BIT) && !defined(__LP64__)
+#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
+#endif
+
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
__asm__("1: " ldx " 0(" sr "%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=r"(__gu_val), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=&r"(__gu_tmp.l), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = __gu_tmp.t; \
}
#define __put_user_internal(sr, x, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
- __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
- case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
- case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
- case 8: STD_USER(sr, __x, ptr); break; \
+ case 1: __put_user_asm(sr, "stb", x, ptr); break; \
+ case 2: __put_user_asm(sr, "sth", x, ptr); break; \
+ case 4: __put_user_asm(sr, "stw", x, ptr); break; \
+ case 8: STD_USER(sr, x, ptr); break; \
default: BUILD_BUG(); \
} \
\
#define __put_user(x, ptr) \
({ \
- __put_user_internal("%%sr3,", x, ptr); \
+ __typeof__(&*(ptr)) __ptr = ptr; \
+ __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \
+ __put_user_internal("%%sr3,", __x, __ptr); \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
"1: " stx " %2,0(" sr "%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err))
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(x))
#if !defined(CONFIG_64BIT)
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err)); \
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(__val)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
return *((u64 *)addr);
}
+u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+
u64 ioread64_hi_lo(const void __iomem *addr)
{
u32 low, high;
}
}
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
iowrite32(val >> 32, addr + sizeof(u32));
EXPORT_SYMBOL(ioread32be);
EXPORT_SYMBOL(ioread64);
EXPORT_SYMBOL(ioread64be);
+EXPORT_SYMBOL(ioread64_lo_hi);
EXPORT_SYMBOL(ioread64_hi_lo);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
EXPORT_SYMBOL(iowrite32be);
EXPORT_SYMBOL(iowrite64);
EXPORT_SYMBOL(iowrite64be);
+EXPORT_SYMBOL(iowrite64_lo_hi);
EXPORT_SYMBOL(iowrite64_hi_lo);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
static bool kernel_set_to_readonly;
-static void __init map_pages(unsigned long start_vaddr,
- unsigned long start_paddr, unsigned long size,
- pgprot_t pgprot, int force)
+static void __ref map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
{
pmd_t *pmd;
pte_t *pg_table;
flush_tlb_all();
}
-void __ref free_initmem(void)
+void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
*
- * This is tricky, because map_pages is in the init section.
* Do a dummy remap of the data section first (the data
* section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
+
+# Newer binutils versions default to ISA spec version 20191213 which moves some
+# instructions from the I extension to the Zicsr and Zifencei extensions.
+toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
+riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)
#include <linux/sched/hotplug.h>
#include <asm/irq.h>
#include <asm/cpu_ops.h>
+#include <asm/numa.h>
#include <asm/sbi.h>
bool cpu_has_hotplug(unsigned int cpu)
return ret;
remove_cpu_topology(cpu);
+ numa_remove_cpu(cpu);
set_cpu_online(cpu, false);
irq_migrate_all_off_this_cpu();
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
- li t0, XIP_OFFSET_MASK
- and t1, t1, t0
- li t1, XIP_OFFSET
- sub t0, t0, t1
- sub \reg, \reg, t0
+ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
.endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int level = 0;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(1);
- sp = (unsigned long)__builtin_frame_address(0);
- pc = (unsigned long)__builtin_return_address(0);
+ fp = (unsigned long)__builtin_frame_address(0);
+ sp = sp_in_global;
+ pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
break;
/* Validate frame pointer */
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx;
+ struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
+ /* By default, make CY, TM, and IR counters accessible in VU mode */
+ reset_csr->scounteren = 0x7;
+
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
csr_write(CSR_HVIP, csr->hvip);
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ guest_state_enter_irqoff();
+ __kvm_riscv_switch_to(&vcpu->arch);
+ guest_state_exit_irqoff();
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int ret;
continue;
}
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
- __kvm_riscv_switch_to(&vcpu->arch);
+ kvm_riscv_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
kvm_riscv_vcpu_sync_interrupts(vcpu);
/*
- * We may have taken a host interrupt in VS/VU-mode (i.e.
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
*
- * We're now back in HS-mode with interrupts disabled
- * so enabling the interrupts now will have the effect
- * of taking the interrupt again, in HS-mode this time.
+ * There's no barrier which ensures that pending interrupts are
+ * recognised, so we just hope that the CPU takes any pending
+ * interrupts between the enable and disable.
*/
local_irq_enable();
+ local_irq_disable();
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest
- * we account that tick as being spent in the guest. We
- * enable preemption after calling guest_exit() so that if
- * we get preempted we make sure ticks after that is not
- * counted as guest time.
- */
- guest_exit();
+ guest_timing_exit_irqoff();
+
+ local_irq_enable();
preempt_enable();
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/version.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/kvm_vcpu_timer.h>
*out_val = KVM_SBI_IMPID;
break;
case SBI_EXT_BASE_GET_IMP_VERSION:
- *out_val = 0;
+ *out_val = LINUX_VERSION_CODE;
break;
case SBI_EXT_BASE_PROBE_EXT:
if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
if (unlikely(offset > MAX_REG_OFFSET))
return;
- if (!offset)
+ if (offset)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
- regs_set_gpr(regs, reg_err, -EFAULT);
- regs_set_gpr(regs, reg_zero, 0);
+ regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
+ regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
regs->epc = get_ex_fixup(ex);
return true;
#ifdef CONFIG_XIP_KERNEL
#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
+#define riscv_pfn_base (*(unsigned long *)XIP_FIXUP(&riscv_pfn_base))
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
}
#ifdef CONFIG_XIP_KERNEL
+#define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
extern char _xiprom[], _exiprom[], __data_loc;
/* called from head.S with MMU off */
return -EINVAL;
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
return -E2BIG;
+ if (!kvm_s390_pv_cpu_is_protected(vcpu))
+ return -EINVAL;
switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ:
#include "test_modules.h"
-#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
-REPEAT_10000(DECLARE_RETURN);
-
/*
* Test that modules with many relocations are loaded properly.
*/
__REPEAT_10000_1(f, 8); \
__REPEAT_10000_1(f, 9)
+#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
+REPEAT_10000(DECLARE_RETURN);
+
#endif
static int crypto_blake2s_update_x86(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
+ return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress);
+ return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
.lbr_read = intel_pmu_lbr_read_64,
.lbr_save = intel_pmu_lbr_save,
.lbr_restore = intel_pmu_lbr_restore,
+
+ /*
+ * SMM has access to all 4 rings and while traditionally SMM code only
+ * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
+ *
+ * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
+ * between SMM or not, this results in what should be pure userspace
+ * counters including SMM data.
+ *
+ * This is a clear privilege issue, therefore globally disable
+ * counting SMM by default.
+ */
+ .attr_freeze_on_smi = 1,
};
static __init void intel_clovertown_quirk(void)
* means we are already losing data; need to let the decoder
* know.
*/
- if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
- buf->output_off == pt_buffer_region_size(buf)) {
+ if (!buf->single &&
+ (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
+ buf->output_off == pt_buffer_region_size(buf))) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
advance++;
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, extra) \
do { \
asm_inline volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
- ".popsection" \
+ ".popsection\n" \
+ extra \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
#else /* !CONFIG_DEBUG_BUGVERBOSE */
-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, extra) \
do { \
asm_inline volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
- ".popsection" \
+ ".popsection\n" \
+ extra \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
#else
-#define _BUG_FLAGS(ins, flags) asm volatile(ins)
+#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
#define BUG() \
do { \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, 0); \
- unreachable(); \
+ _BUG_FLAGS(ASM_UD2, 0, ""); \
+ __builtin_unreachable(); \
} while (0)
/*
*/
#define __WARN_FLAGS(flags) \
do { \
+ __auto_type f = BUGFLAG_WARNING|(flags); \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
- annotate_reachable(); \
+ _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
instrumentation_end(); \
} while (0)
KVM_X86_OP(load_eoi_exitmap)
KVM_X86_OP(set_virtual_apic_mode)
KVM_X86_OP_NULL(set_apic_access_page_addr)
-KVM_X86_OP(deliver_posted_interrupt)
+KVM_X86_OP(deliver_interrupt)
KVM_X86_OP_NULL(sync_pir_to_irr)
KVM_X86_OP(set_tss_addr)
KVM_X86_OP(set_identity_map_addr)
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
- int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
+
+/* AVIC */
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
+#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
+
+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
+#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
+
+#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
+#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
+#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
+
+enum avic_ipi_failure_cause {
+ AVIC_IPI_FAILURE_INVALID_INT_TYPE,
+ AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ AVIC_IPI_FAILURE_INVALID_TARGET,
+ AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+};
+
+
+/*
+ * 0xff is broadcast, so the max index allowed for physical APIC ID
+ * table is 0xfe. APIC IDs above 0xff are reserved.
+ */
+#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
+
+#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
+#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
+
struct vmcb_seg {
u16 selector;
u16 attrib;
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
+/*
+ * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
+ * used to store high bits for the Destination ID. This expands the Destination
+ * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ */
+#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
/*
* Leaf 6 (0x40000x05)
return hypervisor_cpuid_base("XenVMMXenVMM", 2);
}
-#ifdef CONFIG_XEN
-extern bool __init xen_hvm_need_lapic(void);
-
-static inline bool __init xen_x2apic_para_available(void)
-{
- return xen_hvm_need_lapic();
-}
-#else
-static inline bool __init xen_x2apic_para_available(void)
-{
- return (xen_cpuid_base() != 0);
-}
-#endif
-
struct pci_dev;
#ifdef CONFIG_XEN_PV_DOM0
}
kfree(entry);
+ /* Invoke scheduler to prevent soft lockups. */
+ cond_resched();
}
xa_destroy(&encl->page_array);
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dmi.h>
#include <linux/ioport.h>
#include <asm/e820/api.h>
res->start = end + 1;
}
-/*
- * Some BIOS-es contain a bug where they add addresses which map to
- * system RAM in the PCI host bridge window returned by the ACPI _CRS
- * method, see commit 4dc2287c1805 ("x86: avoid E820 regions when
- * allocating address space"). To avoid this Linux by default excludes
- * E820 reservations when allocating addresses since 2010.
- * In 2019 some systems have shown-up with E820 reservations which cover
- * the entire _CRS returned PCI host bridge window, causing all attempts
- * to assign memory to PCI BARs to fail if Linux uses E820 reservations.
- *
- * Ideally Linux would fully stop using E820 reservations, but then
- * the old systems this was added for will regress.
- * Instead keep the old behavior for old systems, while ignoring the
- * E820 reservations for any systems from now on.
- */
static void remove_e820_regions(struct resource *avail)
{
- int i, year = dmi_get_bios_year();
+ int i;
struct e820_entry *entry;
- if (year >= 2018)
- return;
-
- pr_info_once("PCI: Removing E820 reservations from host bridge windows\n");
-
for (i = 0; i < e820_table->nr_entries; i++) {
entry = &e820_table->entries[i];
);
kvm_cpu_cap_mask(CPUID_7_0_EBX,
- F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
- F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
- F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
- F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
- );
+ F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
+ F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
+ F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
+ F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
+ F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
+ F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
+ F(AVX512VL));
kvm_cpu_cap_mask(CPUID_7_ECX,
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
apic->regs + APIC_TMR);
}
- if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
- kvm_lapic_set_irr(vector, apic);
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
- } else {
- trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
- trig_mode, vector);
- }
+ static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
+ trig_mode, vector);
break;
case APIC_DM_REMRD:
apic->irr_pending = true;
apic->isr_count = 1;
} else {
- apic->irr_pending = (apic_search_irr(apic) != -1);
+ /*
+ * Don't clear irr_pending, searching the IRR can race with
+ * updates from the CPU as APICv is still active from hardware's
+ * perspective. The flag will be cleared as appropriate when
+ * KVM injects the interrupt.
+ */
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}
}
}
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
- unsigned config, bool exclude_user,
+ u64 config, bool exclude_user,
bool exclude_kernel, bool intr,
bool in_tx, bool in_tx_cp)
{
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
- unsigned config, type = PERF_TYPE_RAW;
+ u64 config;
+ u32 type = PERF_TYPE_RAW;
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
bool allow_event = true;
}
if (type == PERF_TYPE_RAW)
- config = eventsel & X86_RAW_EVENT_MASK;
+ config = eventsel & AMD64_RAW_EVENT_MASK;
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
return;
#include "irq.h"
#include "svm.h"
-#define SVM_AVIC_DOORBELL 0xc001011b
-
-#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
-
-/*
- * 0xff is broadcast, so the max index allowed for physical APIC ID
- * table is 0xfe. APIC IDs above 0xff are reserved.
- */
-#define AVIC_MAX_PHYSICAL_ID_COUNT 255
-
-#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
-#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
-#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
-
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
void *data; /* Storing pointer to struct amd_ir_data */
};
-enum avic_ipi_failure_cause {
- AVIC_IPI_FAILURE_INVALID_INT_TYPE,
- AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
- AVIC_IPI_FAILURE_INVALID_TARGET,
- AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
-};
/* Note:
* This function is called from IOMMU driver to notify
return 0;
}
+void avic_ring_doorbell(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point,
+ * which could result in signalling the wrong/previous pCPU. But if
+ * that happens the vCPU is guaranteed to do a VMRUN (after being
+ * migrated) and thus will process pending interrupts, i.e. a doorbell
+ * is not needed (and the spurious one is harmless).
+ */
+ int cpu = READ_ONCE(vcpu->cpu);
+
+ if (cpu != get_cpu())
+ wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
+ put_cpu();
+}
+
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
u32 icrl, u32 icrh)
{
kvm_for_each_vcpu(i, vcpu, kvm) {
if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
GET_APIC_DEST_FIELD(icrh),
- icrl & APIC_DEST_MASK))
- kvm_vcpu_wake_up(vcpu);
+ icrl & APIC_DEST_MASK)) {
+ vcpu->arch.apic->irr_pending = true;
+ svm_complete_interrupt_delivery(vcpu,
+ icrl & APIC_MODE_MASK,
+ icrl & APIC_INT_LEVELTRIG,
+ icrl & APIC_VECTOR_MASK);
+ }
}
}
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
break;
case AVIC_IPI_FAILURE_INVALID_TARGET:
- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
- index, vcpu->vcpu_id, icrh, icrl);
break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
return;
}
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
-{
- if (!vcpu->arch.apicv_active)
- return -1;
-
- kvm_lapic_set_irr(vec, vcpu->arch.apic);
-
- /*
- * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
- * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
- * the read of guest_mode, which guarantees that either VMRUN will see
- * and process the new vIRR entry, or that the below code will signal
- * the doorbell if the vCPU is already running in the guest.
- */
- smp_mb__after_atomic();
-
- /*
- * Signal the doorbell to tell hardware to inject the IRQ if the vCPU
- * is in the guest. If the vCPU is not in the guest, hardware will
- * automatically process AVIC interrupts at VMRUN.
- */
- if (vcpu->mode == IN_GUEST_MODE) {
- int cpu = READ_ONCE(vcpu->cpu);
-
- /*
- * Note, the vCPU could get migrated to a different pCPU at any
- * point, which could result in signalling the wrong/previous
- * pCPU. But if that happens the vCPU is guaranteed to do a
- * VMRUN (after being migrated) and thus will process pending
- * interrupts, i.e. a doorbell is not needed (and the spurious
- * one is harmless).
- */
- if (cpu != get_cpu())
- wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
- put_cpu();
- } else {
- /*
- * Wake the vCPU if it was blocking. KVM will then detect the
- * pending IRQ when checking if the vCPU has a wake event.
- */
- kvm_vcpu_wake_up(vcpu);
- }
-
- return 0;
-}
-
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
!__nested_vmcb_check_save(vcpu, &save_cached))
goto out_free;
- /*
- * While the nested guest CR3 is already checked and set by
- * KVM_SET_SREGS, it was set when nested state was yet loaded,
- * thus MMU might not be initialized correctly.
- * Set it again to fix this.
- */
-
- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
- nested_npt_enabled(svm), false);
- if (WARN_ON_ONCE(ret))
- goto out_free;
-
/*
* All checks done, we can enter guest mode. Userspace provides
svm_switch_vmcb(svm, &svm->nested.vmcb02);
nested_vmcb02_prepare_control(svm);
+
+ /*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
+ */
+
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 hcr0 = cr0;
+ bool old_paging = is_paging(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
#endif
vcpu->arch.cr0 = cr0;
- if (!npt_enabled)
+ if (!npt_enabled) {
hcr0 |= X86_CR0_PG | X86_CR0_WP;
+ if (old_paging != is_paging(vcpu))
+ svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
/*
* re-enable caching here because the QEMU bios
svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4;
- if (!npt_enabled)
+ if (!npt_enabled) {
cr4 |= X86_CR4_PAE;
+
+ if (!is_paging(vcpu))
+ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
+ }
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vector)
+{
+ /*
+ * vcpu->arch.apicv_active must be read after vcpu->mode.
+ * Pairs with smp_store_release in vcpu_enter_guest.
+ */
+ bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
+
+ if (!READ_ONCE(vcpu->arch.apicv_active)) {
+ /* Process the interrupt via inject_pending_event */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ return;
+ }
+
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+ if (in_guest_mode) {
+ /*
+ * Signal the doorbell to tell hardware to inject the IRQ. If
+ * the vCPU exits the guest before the doorbell chimes, hardware
+ * will automatically process AVIC interrupts at the next VMRUN.
+ */
+ avic_ring_doorbell(vcpu);
+ } else {
+ /*
+ * Wake the vCPU if it was blocking. KVM will then detect the
+ * pending IRQ when checking if the vCPU has a wake event.
+ */
+ kvm_vcpu_wake_up(vcpu);
+ }
+}
+
+static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ kvm_lapic_set_irr(vector, apic);
+
+ /*
+ * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
+ * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
+ * the read of guest_mode. This guarantees that either VMRUN will see
+ * and process the new vIRR entry, or that svm_complete_interrupt_delivery
+ * will signal the doorbell if the CPU has already entered the guest.
+ */
+ smp_mb__after_atomic();
+ svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
+}
+
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_nmi_blocked(vcpu))
+ return 0;
+
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return -EBUSY;
-
- return !svm_nmi_blocked(vcpu);
+ return 1;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
struct vcpu_svm *svm = to_svm(vcpu);
+
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_interrupt_blocked(vcpu))
+ return 0;
+
/*
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
* e.g. if the IRQ arrived asynchronously after checking nested events.
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
return -EBUSY;
- return !svm_interrupt_blocked(vcpu);
+ return 1;
}
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
- kvm_guest_enter_irqoff();
+ guest_state_enter_irqoff();
if (sev_es_guest(vcpu->kvm)) {
__svm_sev_es_vcpu_run(vmcb_pa);
vmload(__sme_page_pa(sd->save_area));
}
- kvm_guest_exit_irqoff();
+ guest_state_exit_irqoff();
}
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_smi_blocked(vcpu))
+ return 0;
+
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
return -EBUSY;
- return !svm_smi_blocked(vcpu);
+ return 1;
}
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
* Enter the nested guest now
*/
+ vmcb_mark_all_dirty(svm->vmcb01.ptr);
+
vmcb12 = map.hva;
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+ if (ret)
+ goto unmap_save;
+
+ svm->nested.nested_run_pending = 1;
+
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
.pmu_ops = &amd_pmu_ops,
.nested_ops = &svm_nested_ops,
- .deliver_posted_interrupt = svm_deliver_avic_intr,
+ .deliver_interrupt = svm_deliver_interrupt,
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
if (nested) {
kvm_cpu_cap_set(X86_FEATURE_SVM);
+ kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
if (nrips)
kvm_cpu_cap_set(X86_FEATURE_NRIPS);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write);
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vec);
/* nested.c */
/* avic.c */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void avic_ring_doorbell(struct kvm_vcpu *vcpu);
/* sev.c */
return 0;
}
+static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ struct kvm_vcpu *vcpu = apic->vcpu;
+
+ if (vmx_deliver_posted_interrupt(vcpu, vector)) {
+ kvm_lapic_set_irr(vector, apic);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ } else {
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
+ trig_mode, vector);
+ }
+}
+
/*
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
* will not change in the lifetime of the guest.
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx)
{
- kvm_guest_enter_irqoff();
+ guest_state_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
vcpu->arch.cr2 = native_read_cr2();
- kvm_guest_exit_irqoff();
+ guest_state_exit_irqoff();
}
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (ret)
return ret;
+ vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
return 0;
.hwapic_isr_update = vmx_hwapic_isr_update,
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .deliver_interrupt = vmx_deliver_interrupt,
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
.set_tss_addr = vmx_set_tss_addr,
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
+#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
+
#define emul_to_vcpu(ctxt) \
((struct kvm_vcpu *)(ctxt)->vcpu)
void __user *uaddr = (void __user*)(unsigned long)attr->addr;
if ((u64)(unsigned long)uaddr != attr->addr)
- return ERR_PTR(-EFAULT);
+ return ERR_PTR_USR(-EFAULT);
return uaddr;
}
* result in virtual interrupt delivery.
*/
local_irq_disable();
- vcpu->mode = IN_GUEST_MODE;
+
+ /* Store vcpu->apicv_active before vcpu->mode. */
+ smp_store_release(&vcpu->mode, IN_GUEST_MODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
set_debugreg(0, 7);
}
+ guest_timing_enter_irqoff();
+
for (;;) {
/*
* Assert that vCPU vs. VM APICv state is consistent. An APICv
* of accounting via context tracking, but the loss of accuracy is
* acceptable for all known use cases.
*/
- vtime_account_guest_exit();
+ guest_timing_exit_irqoff();
if (lapic_in_kernel(vcpu)) {
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
kvm_free_pit(kvm);
}
-#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
-
/**
* __x86_set_memory_region: Setup KVM internal memory slot
*
void kvm_spurious_fault(void);
-static __always_inline void kvm_guest_enter_irqoff(void)
-{
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * This ensures that e.g. latency analysis on the host observes
- * guest mode as interrupt enabled.
- *
- * guest_enter_irqoff() informs context tracking about the
- * transition to guest mode and if enabled adjusts RCU state
- * accordingly.
- */
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
-
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
-
-static __always_inline void kvm_guest_exit_irqoff(void)
-{
- /*
- * VMEXIT disables interrupts (host state), but tracing and lockdep
- * have them in state 'on' as recorded before entering guest mode.
- * Same as enter_from_user_mode().
- *
- * context_tracking_guest_exit() restores host context and reinstates
- * RCU if enabled and required.
- *
- * This needs to be done immediately after VM-Exit, before any code
- * that might contain tracepoints or call out to the greater world,
- * e.g. before x86_spec_ctrl_restore_host().
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- context_tracking_guest_exit();
-
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
({ \
bool failed = (consistency_check); \
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
{
struct kvm_vcpu_xen *vx = &v->arch.xen;
+ struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ bool atomic = (state == RUNSTATE_runnable);
uint64_t state_entry_time;
- unsigned int offset;
+ int __user *user_state;
+ uint64_t __user *user_times;
kvm_xen_update_runstate(v, state);
if (!vx->runstate_set)
return;
- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
+ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
+ return;
+
+ /* We made sure it fits in a single page */
+ BUG_ON(!ghc->memslot);
+
+ if (atomic)
+ pagefault_disable();
- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
-#ifdef CONFIG_X86_64
/*
- * The only difference is alignment of uint64_t in 32-bit.
- * So the first field 'state' is accessed directly using
- * offsetof() (where its offset happens to be zero), while the
- * remaining fields which are all uint64_t, start at 'offset'
- * which we tweak here by adding 4.
+ * The only difference between 32-bit and 64-bit versions of the
+ * runstate struct us the alignment of uint64_t in 32-bit, which
+ * means that the 64-bit version has an additional 4 bytes of
+ * padding after the first field 'state'.
+ *
+ * So we use 'int __user *user_state' to point to the state field,
+ * and 'uint64_t __user *user_times' for runstate_entry_time. So
+ * the actual array of time[] in each state starts at user_times[1].
*/
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
+ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
+ user_state = (int __user *)ghc->hva;
+
+ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct compat_vcpu_runstate_info,
+ state_entry_time));
+#ifdef CONFIG_X86_64
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
offsetof(struct compat_vcpu_runstate_info, time) + 4);
if (v->kvm->arch.xen.long_mode)
- offset = offsetof(struct vcpu_runstate_info, state_entry_time);
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct vcpu_runstate_info,
+ state_entry_time));
#endif
/*
* First write the updated state_entry_time at the appropriate
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
sizeof(state_entry_time));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ if (__put_user(state_entry_time, user_times))
+ goto out;
smp_wmb();
/*
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
sizeof(vx->current_runstate));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->current_runstate,
- offsetof(struct vcpu_runstate_info, state),
- sizeof(vx->current_runstate)))
- return;
+ if (__put_user(vx->current_runstate, user_state))
+ goto out;
/*
* Write the actual runstate times immediately after the
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
sizeof(vx->runstate_times));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->runstate_times[0],
- offset + sizeof(u64),
- sizeof(vx->runstate_times)))
- return;
-
+ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
+ goto out;
smp_wmb();
/*
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
* runstate_entry_time field.
*/
-
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ __put_user(state_entry_time, user_times);
+ smp_wmb();
+
+ out:
+ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+ if (atomic)
+ pagefault_enable();
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_info_cache,
data->u.gpa,
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache,
data->u.gpa,
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.runstate_cache,
data->u.gpa,
#include <xen/events.h>
#include <xen/interface/memory.h>
+#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/io_apic.h>
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
xen_teardown_timer(cpu);
-
- return 0;
+ return 0;
}
static bool no_vector_callback __initdata;
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
-bool __init xen_hvm_need_lapic(void)
+static __init bool xen_x2apic_available(void)
{
- if (xen_pv_domain())
- return false;
- if (!xen_hvm_domain())
- return false;
- if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
- return false;
- return true;
+ return x2apic_supported();
+}
+
+static bool __init msi_ext_dest_id(void)
+{
+ return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID;
}
static __init void xen_hvm_guest_late_init(void)
.detect = xen_platform_hvm,
.type = X86_HYPER_XEN_HVM,
.init.init_platform = xen_hvm_guest_init,
- .init.x2apic_available = xen_x2apic_para_available,
+ .init.x2apic_available = xen_x2apic_available,
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
.init.guest_late_init = xen_hvm_guest_late_init,
+ .init.msi_ext_dest_id = msi_ext_dest_id,
.runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
};
xen_acpi_sleep_register();
- /* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
xen_boot_params_init_edd();
#ifdef CONFIG_ACPI
return rc;
}
-static void __init xen_fill_possible_map(void)
-{
- int i, rc;
-
- if (xen_initial_domain())
- return;
-
- for (i = 0; i < nr_cpu_ids; i++) {
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
- if (rc >= 0) {
- num_processors++;
- set_cpu_possible(i, true);
- }
- }
-}
-
-static void __init xen_filter_cpu_maps(void)
+static void __init _get_smp_config(unsigned int early)
{
int i, rc;
unsigned int subtract = 0;
- if (!xen_initial_domain())
+ if (early)
return;
num_processors = 0;
* sure the old memory can be recycled. */
make_lowmem_page_readwrite(xen_initial_gdt);
- xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
/*
void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
- xen_fill_possible_map();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = _get_smp_config;
}
screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.ext_lfb_base)
+ + sizeof(info->u.vesa_lfb.ext_lfb_base)
+ && info->u.vesa_lfb.ext_lfb_base) {
+ screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
+ screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ }
+
if (info->video_type == XEN_VGATYPE_EFI_LFB) {
screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
break;
u.vesa_lfb.mode_attrs)
+ sizeof(info->u.vesa_lfb.mode_attrs))
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
-
- if (size >= offsetof(struct dom0_vga_console_info,
- u.vesa_lfb.ext_lfb_base)
- + sizeof(info->u.vesa_lfb.ext_lfb_base)
- && info->u.vesa_lfb.ext_lfb_base) {
- screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
- screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
- }
break;
}
}
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
- bip->bip_iter.bi_sector += bytes_done >> 9;
+ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
{
struct block_device *bdev = iocb->ki_filp->private_data;
loff_t size = bdev_nr_bytes(bdev);
- size_t count = iov_iter_count(to);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
ssize_t ret = 0;
+ size_t count;
- if (unlikely(pos + count > size)) {
+ if (unlikely(pos + iov_iter_count(to) > size)) {
if (pos >= size)
return 0;
size -= pos;
- if (count > size) {
- shorted = count - size;
- iov_iter_truncate(to, size);
- }
+ shorted = iov_iter_count(to) - size;
+ iov_iter_truncate(to, size);
}
+ count = iov_iter_count(to);
+ if (!count)
+ goto reexpand; /* skip atime */
+
if (iocb->ki_flags & IOCB_DIRECT) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
if (iocb->ki_flags & IOCB_NOWAIT) {
- if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
- iocb->ki_pos + count - 1))
- return -EAGAIN;
+ if (filemap_range_needs_writeback(mapping, pos,
+ pos + count - 1)) {
+ ret = -EAGAIN;
+ goto reexpand;
+ }
} else {
- ret = filemap_write_and_wait_range(mapping,
- iocb->ki_pos,
- iocb->ki_pos + count - 1);
+ ret = filemap_write_and_wait_range(mapping, pos,
+ pos + count - 1);
if (ret < 0)
- return ret;
+ goto reexpand;
}
file_accessed(iocb->ki_filp);
iocb->ki_pos += ret;
count -= ret;
}
+ iov_iter_revert(to, count - iov_iter_count(to));
if (ret < 0 || !count)
- return ret;
+ goto reexpand;
}
ret = filemap_read(iocb, to, ret);
+reexpand:
if (unlikely(shorted))
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
struct list_head list;
};
-static atomic_long_t alg_memory_allocated;
-
static struct proto alg_proto = {
.name = "ALG",
.owner = THIS_MODULE,
- .memory_allocated = &alg_memory_allocated,
.obj_size = sizeof(struct alg_sock),
};
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cryptographic algorithms API");
+MODULE_SOFTDEP("pre: cryptomgr");
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: cryptomgr");
static int crypto_blake2s_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
+ return crypto_blake2s_update(desc, in, inlen, true);
}
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress_generic);
+ return crypto_blake2s_final(desc, out, true);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
+ { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
{ INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
depends on ARCH_SUPPORTS_ACPI
select PNP
select NLS
+ select CRC32
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM;
- res[1].start = pmcg->page1_base_address;
- res[1].end = pmcg->page1_base_address + SZ_4K - 1;
- res[1].flags = IORESOURCE_MEM;
+ /*
+ * The initial version in DEN0049C lacked a way to describe register
+ * page 1, which makes it broken for most PMCG implementations; in
+ * that case, just let the driver fail gracefully if it expects to
+ * find a second memory resource.
+ */
+ if (node->revision > 0) {
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+ }
if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
if (acpi_any_gpe_status_set(first_ec->gpe))
return true;
+ /*
+ * Cancel the SCI wakeup and process all pending events in case there
+ * are any wakeup ones in there.
+ *
+ * Note that if any non-EC GPEs are active at this point, the SCI will
+ * retrigger after the rearming in acpi_s2idle_wake(), so no events
+ * should be missed by canceling the wakeup here.
+ */
+ pm_system_cancel_wakeup();
+
/*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
return true;
}
- /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+ /*
+ * Check non-EC GPE wakeups and if there are none, cancel the
+ * SCI-related wakeup and dispatch the EC GPE.
+ */
if (acpi_ec_dispatch_gpe()) {
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
}
- /*
- * Cancel the SCI wakeup and process all pending events in case
- * there are any wakeup ones in there.
- *
- * Note that if any non-EC GPEs are active at this point, the
- * SCI will retrigger after the rearming below, so no events
- * should be missed by canceling the wakeup here.
- */
- pm_system_cancel_wakeup();
acpi_os_wait_events_complete();
/*
return true;
}
+ pm_wakeup_clear(acpi_sci_irq);
rearm_wake_irq(acpi_sci_irq);
}
mem_sleep_current = PM_SUSPEND_TO_IDLE;
/*
- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
- * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
- *
- * Only enable on !AMD as enabling this universally causes problems for a number
- * of AMD based systems.
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
*/
- if (!acpi_s2idle_vendor_amd())
- acpi_ec_mark_gpe_for_wake();
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
{
struct ata_port *ap = dev->link->ap;
+ if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ return false;
+
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
return false;
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
- if (!ata_identify_page_supported(dev,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ if (ata_id_major_version(dev->id) < 11 ||
+ !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
goto out;
/*
- * Read IDENTIFY DEVICE data log, page 0x47
- * (concurrent positioning ranges). We can have at most 255 32B range
- * descriptors plus a 64B header.
+ * Read the concurrent positioning ranges log (0x47). We can have at
+ * most 255 32B range descriptors plus a 64B header.
*/
buf_len = (64 + 255 * 32 + 511) & ~511;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
- err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES,
- buf, buf_len >> 9);
+ err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ 0, buf, buf_len >> 9);
if (err_mask)
goto out;
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ /*
+ * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
+ * log page is accessed. Ensure we never ask for this log page with
+ * these devices.
+ */
+ { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+
/* End Marker */
{ }
};
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%d %d\n",
+ return sysfs_emit(buf, "%u %u\n",
intr_coalescing_count, intr_coalescing_ticks);
}
{
unsigned int coalescing_count, coalescing_ticks;
- if (sscanf(buf, "%d%d",
- &coalescing_count,
- &coalescing_ticks) != 2) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
rx_watermark &= 0x1f;
spin_unlock_irqrestore(&host->lock, flags);
- return sysfs_emit(buf, "%d\n", rx_watermark);
+ return sysfs_emit(buf, "%u\n", rx_watermark);
}
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
void __iomem *csr_base = host_priv->csr_base;
u32 temp;
- if (sscanf(buf, "%d", &rx_watermark) != 1) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (kstrtouint(buf, 10, &rx_watermark) < 0) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
temp = ioread32(csr_base + TRANSCFG);
temp &= 0xffffffe0;
iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
-
spin_unlock_irqrestore(&host->lock, flags);
+
return strlen(buf);
}
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
atomic_dec_if_positive(&pm_abort_suspend);
}
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
{
- pm_wakeup_irq = 0;
- if (reset)
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
- if (pm_wakeup_irq == 0) {
- pm_wakeup_irq = irq_number;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
pm_system_wakeup();
- }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
}
/**
return error;
}
-static void __loop_clr_fd(struct loop_device *lo)
+static void __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
/* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
+ /* This is safe: open() is still holding a reference. */
+ module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err;
- mutex_lock(&lo->lo_disk->open_mutex);
+ /*
+ * open_mutex has been held already in release path, so don't
+ * acquire it if this function is called in such case.
+ *
+ * If the reread partition isn't from release path, lo_refcnt
+ * must be at least one and it can only become zero when the
+ * current holder is released.
+ */
+ if (!release)
+ mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false);
- mutex_unlock(&lo->lo_disk->open_mutex);
+ if (!release)
+ mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
__func__, lo->lo_number, err);
/* Device is gone, no point in returning error */
}
+ /*
+ * lo->lo_state is set to Lo_unbound here after above partscan has
+ * finished. There cannot be anybody else entering __loop_clr_fd() as
+ * Lo_rundown state protects us from all the other places trying to
+ * change the 'lo' device.
+ */
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART;
-
- fput(filp);
-}
-
-static void loop_rundown_completed(struct loop_device *lo)
-{
mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
- module_put(THIS_MODULE);
-}
-
-static void loop_rundown_workfn(struct work_struct *work)
-{
- struct loop_device *lo = container_of(work, struct loop_device,
- rundown_work);
- struct block_device *bdev = lo->lo_device;
- struct gendisk *disk = lo->lo_disk;
-
- __loop_clr_fd(lo);
- kobject_put(&bdev->bd_device.kobj);
- module_put(disk->fops->owner);
- loop_rundown_completed(lo);
-}
-static void loop_schedule_rundown(struct loop_device *lo)
-{
- struct block_device *bdev = lo->lo_device;
- struct gendisk *disk = lo->lo_disk;
-
- __module_get(disk->fops->owner);
- kobject_get(&bdev->bd_device.kobj);
- INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
- queue_work(system_long_wq, &lo->rundown_work);
+ /*
+ * Need not hold lo_mutex to fput backing file. Calling fput holding
+ * lo_mutex triggers a circular lock dependency possibility warning as
+ * fput can take open_mutex which is usually taken before lo_mutex.
+ */
+ fput(filp);
}
static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
- __loop_clr_fd(lo);
- loop_rundown_completed(lo);
+ __loop_clr_fd(lo, false);
return 0;
}
* In autoclear mode, stop the loop thread
* and remove configuration after last close.
*/
- loop_schedule_rundown(lo);
+ __loop_clr_fd(lo, true);
return;
} else if (lo->lo_state == Lo_bound) {
/*
struct gendisk *lo_disk;
struct mutex lo_mutex;
bool idr_visible;
- struct work_struct rundown_work;
};
struct loop_cmd {
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
.config = &modem_mv31_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
};
static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
return arch_init;
}
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
+static bool __init crng_init_try_arch_early(void)
{
int i;
bool arch_init = true;
rv = random_get_entropy();
arch_init = false;
}
- crng->state[i] ^= rv;
+ primary_crng.state[i] ^= rv;
}
return arch_init;
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-static void __init crng_initialize_primary(struct crng_state *crng)
+static void __init crng_initialize_primary(void)
{
- _extract_entropy(&crng->state[4], sizeof(u32) * 12);
- if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
+ _extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
+ if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-static void crng_finalize_init(struct crng_state *crng)
+static void crng_finalize_init(void)
{
- if (crng != &primary_crng || crng_init >= 2)
- return;
if (!system_wq) {
/* We can't call numa_crng_init until we have workqueues,
* so mark this for processing later. */
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
+ crng_need_final_init = false;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
memzero_explicit(&buf, sizeof(buf));
WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- crng_finalize_init(crng);
+ if (crng == &primary_crng && crng_init < 2)
+ crng_finalize_init();
}
static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
{
init_std_data();
if (crng_need_final_init)
- crng_finalize_init(&primary_crng);
- crng_initialize_primary(&primary_crng);
+ crng_finalize_init();
+ crng_initialize_primary();
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- input_pool.entropy_count = 0;
+ if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return;
}
- /* Suspend writing if we're above the trickle threshold.
+ /* Throttle writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
+ * when the calling thread is about to terminate, or once
+ * CRNG_RESEED_INTERVAL has lapsed.
*/
- wait_event_interruptible(random_write_wait,
+ wait_event_interruptible_timeout(random_write_wait,
!system_wq || kthread_should_stop() ||
- POOL_ENTROPY_BITS() <= random_write_wakeup_bits);
+ POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
+ CRNG_RESEED_INTERVAL);
mix_pool_bytes(buffer, count);
credit_entropy_bits(entropy);
}
bool quirk_unreliable_oscillator = false;
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
- if (of_machine_is_compatible("ti,omap3-beagle") ||
+ if (of_machine_is_compatible("ti,omap3-beagle-ab4") ||
of_machine_is_compatible("timll,omap3-devkit8000")) {
quirk_unreliable_oscillator = true;
counter_32k = -ENODEV;
char engs_info[2 * OTX2_CPT_NAME_LENGTH];
struct otx2_cpt_eng_grp_info *grp;
struct otx2_cpt_engs_rsvd *engs;
- u32 mask[4];
int i, j;
pr_debug("Engine groups global info");
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
engs = &grp->engs[j];
if (engs->type) {
+ u32 mask[5] = { };
+
get_engs_info(grp, engs_info,
2 * OTX2_CPT_NAME_LENGTH, j);
pr_debug("Slot%d: %s", j, engs_info);
#include <linux/xarray.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/dma-heap.h>
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
return -EINVAL;
+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
/* Get the kernel ioctl cmd that matches */
kcmd = dma_heap_ioctl_cmds[nr];
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"No irq %d in DT\n", irq);
- return -ENODEV;
+ return irq;
}
/* Arria10 has a 2nd IRQ */
irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
- rc = -EINVAL;
+ rc = irq;
goto out_err;
}
rc = devm_request_irq(&pdev->dev, irq,
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- return gpiod_get_value(fwd->descs[offset]);
+ return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
+ : gpiod_get_value(fwd->descs[offset]);
}
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
for_each_set_bit(i, mask, fwd->chip.ngpio)
descs[j++] = fwd->descs[i];
- error = gpiod_get_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
+ else
+ error = gpiod_get_array_value(j, descs, NULL, values);
if (error)
return error;
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- gpiod_set_value(fwd->descs[offset], value);
+ if (chip->can_sleep)
+ gpiod_set_value_cansleep(fwd->descs[offset], value);
+ else
+ gpiod_set_value(fwd->descs[offset], value);
}
static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
descs[j++] = fwd->descs[i];
}
- gpiod_set_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ else
+ gpiod_set_array_value(j, descs, NULL, values);
}
static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
NULL,
chip->base + SIFIVE_GPIO_OUTPUT_EN,
chip->base + SIFIVE_GPIO_INPUT_EN,
- 0);
+ BGPIOF_READ_OUTPUT_REG_SET);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
return container_of(group, struct gpio_sim_bank, group);
}
+static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank)
+{
+ return bank->label && *bank->label;
+}
+
static struct gpio_sim_device *
gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
{
* point the device doesn't exist yet and so dev_name()
* is not available.
*/
- hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%s", dev->id,
- fwnode_get_name(bank->swnode));
+ if (gpio_sim_bank_has_label(bank))
+ hog->chip_label = kstrdup(bank->label,
+ GFP_KERNEL);
+ else
+ hog->chip_label = kasprintf(GFP_KERNEL,
+ "gpio-sim.%u-%s",
+ dev->id,
+ fwnode_get_name(
+ bank->swnode));
if (!hog->chip_label) {
gpio_sim_remove_hogs(dev);
return -ENOMEM;
properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
- if (bank->label && (strlen(bank->label) > 0))
+ if (gpio_sim_bank_has_label(bank))
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
goto out_free_lh;
}
- ret = gpiod_request(desc, lh->label);
+ ret = gpiod_request_user(desc, lh->label);
if (ret)
goto out_free_lh;
lh->descs[i] = desc;
goto out_free_linereq;
}
- ret = gpiod_request(desc, lr->label);
+ ret = gpiod_request_user(desc, lr->label);
if (ret)
goto out_free_linereq;
}
}
- ret = gpiod_request(desc, le->label);
+ ret = gpiod_request_user(desc, le->label);
if (ret)
goto out_free_le;
le->desc = desc;
* they may be undone on its behalf too.
*/
- status = gpiod_request(desc, "sysfs");
- if (status) {
- if (status == -EPROBE_DEFER)
- status = -ENODEV;
+ status = gpiod_request_user(desc, "sysfs");
+ if (status)
goto done;
- }
status = gpiod_set_transitory(desc, false);
if (!status) {
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
+
+static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
+{
+ int ret;
+
+ ret = gpiod_request(desc, label);
+ if (ret == -EPROBE_DEFER)
+ ret = -ENODEV;
+
+ return ret;
+}
+
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
enum amdgpu_ss ss_state) { return 0; }
#endif
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+#endif
+
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
}
}
+#if IS_ENABLED(CONFIG_SUSPEND)
+/**
+ * amdgpu_acpi_is_s3_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+{
+ return !(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
/**
* amdgpu_acpi_is_s0ix_active
*
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- if (adev->flags & AMD_IS_APU)
- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
+ if (!(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ return false;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
}
-#endif
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
+#else
+ return true;
+#endif /* CONFIG_AMD_PMC */
}
+
+#endif /* CONFIG_SUSPEND */
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
if (amdgpu_device_supports_boco(drm_dev))
- return pm_runtime_suspended(dev) &&
- pm_suspend_via_firmware();
+ return pm_runtime_suspended(dev);
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+ */
+ if (!amdgpu_acpi_is_s0ix_active(adev) &&
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
return 0;
}
unsigned i;
int r;
- if (direct_submit && !ring->sched.ready) {
+ if (!direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
disabled_sa = tmp;
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+ break;
+ default:
+ break;
}
}
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
+ return;
+
adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 7.95,
- .sr_enter_plus_exit_time_us = 9,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
}
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 6.09,
- .sr_enter_plus_exit_time_us = 7.14,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
}
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
dcn31_bw_params.wm_table = lpddr5_wm_table;
} else {
- dcn31_bw_params.wm_table = ddr4_wm_table;
+ dcn31_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
if (result == VBIOSSMC_Result_Failed) {
- ASSERT(0);
+ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ param == TABLE_WATERMARKS)
+ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else
+ ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
return -1;
}
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
}
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+ uint8_t fwrev_mbp_2018[] = { 7, 4 };
+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+ /* We also check for the firmware revision as 16,1 models have an
+ * identical device id and are incorrectly quirked otherwise.
+ */
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+ sizeof(str_mbp_2018)) &&
+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+ sizeof(fwrev_mbp_2018)) ||
+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+ sizeof(fwrev_mbp_2018_vega)))) {
+ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+ }
+ }
+
memset(&link->dpcd_caps.dsc_caps, '\0',
sizeof(link->dpcd_caps.dsc_caps));
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ uint32_t max_otg_num;
};
struct dc_bug_wa {
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
- if (dc_is_embedded_signal(pipe_ctx->stream->signal) &&
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
- pipe_ctx->stream_res.stream_enc);
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
break;
}
}
- // We are trying to enable eDP, don't power down VDD
- if (can_apply_edp_fast_boot)
+
+ /*
+ * TO-DO: So far the code logic below only addresses single eDP case.
+ * For dual eDP case, there are a few things that need to be
+ * implemented first:
+ *
+ * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+ * stream[0 or 1] will all be checked.
+ *
+ * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+ * for each eDP.
+ *
+ * Once above 2 things are completed, we can then change the logic below
+ * correspondingly, so dual eDP case will be fully covered.
+ */
+
+ // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+ if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
keep_edp_vdd_on = true;
+ DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+ } else {
+ DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+ }
}
// Check seamless boot support
}
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-
- /* set DIG_START to 0x1 to reset FIFO */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
- udelay(100);
-
- /* write 0 to take the FIFO out of reset */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
-}
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
ret_val = wm_ns * refclk_mhz;
ret_val /= 1000;
- if (ret_val > clamp_value)
+ if (ret_val > clamp_value) {
+ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+ ASSERT(0);
ret_val = clamp_value;
+ }
return ret_val;
}
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
watermarks->a.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
watermarks->b.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
watermarks->c.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
watermarks->d.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
watermarks->d.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
+ uint16_t *mgpu_fan_boost_limit_rpm;
+ GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
- if (!smc_pptable->MGpuFanBoostLimitRpm)
+ if (*mgpu_fan_boost_limit_rpm == 0)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/math64.h>
/*
* ui2bc - UI time periods to byte clock cycles
*/
-static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+static u32 ui2bc(unsigned int ui)
{
- u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
-
- return DIV64_U64_ROUND_UP(ui * dsi->lanes,
- dsi->mode.clock * 1000 * bpp);
+ return DIV_ROUND_UP(ui, BITS_PER_BYTE);
}
/*
}
/* values in byte clock cycles */
- cycles = ui2bc(dsi, cfg->clk_pre);
+ cycles = ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
- cycles += ui2bc(dsi, cfg->clk_pre);
+ cycles += ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
cycles = ps2bc(dsi, cfg->hs_exit);
*
* The notifier is called with no locks held. The new hw_state and sw_state
* can be retrieved using the drm_privacy_screen_get_state() function.
- * A pointer to the drm_privacy_screen's struct is passed as the void *data
+ * A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
* argument of the notifier_block's notifier_call.
*
* The notifier will NOT be called when changes are made through
vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
+ skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
}
struct drm_display_mode *fixed_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = connector->encoder;
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
return NULL;
}
+ if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
+ encoder->port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS only supported on eDP port A\n");
+ return NULL;
+ }
+
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
const struct intel_crtc_state *pipe_config =
overlay->crtc->config;
+ if (rec->dst_height == 0 || rec->dst_width == 0)
+ return -EINVAL;
+
if (rec->dst_x < pipe_config->pipe_src_w &&
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
rec->dst_y < pipe_config->pipe_src_h &&
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assuming not complete\n",
timeout) < 0) {
i915_request_put(rq);
- tl = intel_context_timeline_lock(ce);
+ /*
+ * Error path, cannot use intel_context_timeline_lock as
+ * that is user interruptable and this clean up step
+ * must be done.
+ */
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- intel_context_timeline_unlock(tl);
+ mutex_unlock(&ce->timeline->mutex);
if (nonblock)
return -EWOULDBLOCK;
if (!IS_ERR(fence))
goto out;
- } else if (move_deps) {
- int err = i915_deps_sync(move_deps, ctx);
+ } else {
+ int err = PTR_ERR(fence);
+
+ if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
+ return fence;
- if (err)
- return ERR_PTR(err);
+ if (move_deps) {
+ err = i915_deps_sync(move_deps, ctx);
+ if (err)
+ return ERR_PTR(err);
+ }
}
/* Error intercept failed or no accelerated migration to start with */
* context usage for overflows.
*/
struct delayed_work work;
+
+ /**
+ * @shift: Right shift value for the gpm timestamp
+ */
+ u32 shift;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
if (new_start == lower_32_bits(*prev_start))
return;
+ /*
+ * When gt is unparked, we update the gt timestamp and start the ping
+ * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
+ * is unparked, all switched in contexts will have a start time that is
+ * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
+ *
+ * If neither gt_stamp nor new_start has rolled over, then the
+ * gt_stamp_hi does not need to be adjusted, however if one of them has
+ * rolled over, we need to adjust gt_stamp_hi accordingly.
+ *
+ * The below conditions address the cases of new_start rollover and
+ * gt_stamp_last rollover respectively.
+ */
if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
}
-static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+/*
+ * GuC updates shared memory and KMD reads it. Since this is not synchronized,
+ * we run into a race where the value read is inconsistent. Sometimes the
+ * inconsistency is in reading the upper MSB bytes of the last_in value when
+ * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
+ * 24 bits are zero. Since these are non-zero values, it is non-trivial to
+ * determine validity of these values. Instead we read the values multiple times
+ * until they are consistent. In test runs, 3 attempts results in consistent
+ * values. The upper bound is set to 6 attempts and may need to be tuned as per
+ * any new occurences.
+ */
+static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ u32 *last_in, u32 *id, u32 *total)
{
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ int i = 0;
+
+ do {
+ *last_in = READ_ONCE(rec->last_switch_in_stamp);
+ *id = READ_ONCE(rec->current_context_index);
+ *total = READ_ONCE(rec->total_runtime);
+
+ if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
+ READ_ONCE(rec->current_context_index) == *id &&
+ READ_ONCE(rec->total_runtime) == *total)
+ break;
+ } while (++i < 6);
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 last_switch = rec->last_switch_in_stamp;
- u32 ctx_id = rec->current_context_index;
- u32 total = rec->total_runtime;
+ u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
+ __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
+
stats->running = ctx_id != ~0U && last_switch;
if (stats->running)
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
}
}
-static void guc_update_pm_timestamp(struct intel_guc *guc,
- struct intel_engine_cs *engine,
- ktime_t *now)
+static u32 gpm_timestamp_shift(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+
+ shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
+
+ return 3 - shift;
+}
+
+static u64 gpm_timestamp(struct intel_gt *gt)
+{
+ u32 lo, hi, old_hi, loop = 0;
+
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ do {
+ lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
+ old_hi = hi;
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ } while (old_hi != hi && loop++ < 2);
+
+ return ((u64)hi << 32) | lo;
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{
- u32 gt_stamp_now, gt_stamp_hi;
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 gt_stamp_lo, gt_stamp_hi;
+ u64 gpm_ts;
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
- gt_stamp_now = intel_uncore_read(engine->uncore,
- RING_TIMESTAMP(engine->mmio_base));
+ gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
+ gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
- if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
gt_stamp_hi++;
- guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
}
/*
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
+ /*
+ * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
+ * start_gt_clk' calculation below for active engines.
+ */
guc_update_engine_gt_clks(engine);
- guc_update_pm_timestamp(guc, engine, now);
+ guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
guc_update_engine_gt_clks(engine);
engine->stats.guc.prev_total = 0;
}
ktime_t unused;
spin_lock_irqsave(&guc->timestamp.lock, flags);
- for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id)
guc_update_engine_gt_clks(engine);
- }
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
struct intel_guc *guc = >->uc.guc;
+ unsigned long flags;
+ ktime_t unused;
if (!guc_submission_initialized(guc))
return;
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
guc->timestamp.ping_delay);
}
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+ guc->timestamp.shift = gpm_timestamp_shift(gt);
return 0;
}
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
if (!ee)
return NULL;
#ifndef __I915_MM_H__
#define __I915_MM_H__
+#include <linux/bug.h>
#include <linux/types.h>
struct vm_area_struct;
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define GUCPMTIMESTAMP _MMIO(0xC3E8)
+#define MISC_STATUS0 _MMIO(0xA500)
+#define MISC_STATUS1 _MMIO(0xA504)
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
};
static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
},
.join_mbus = true,
},
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
{
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
}
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
const struct dbuf_slice_conf_entry *dbuf_slices)
{
int i;
for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe];
}
return 0;
* returns correspondent DBuf slice mask as stated in BSpec for particular
* platform.
*/
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
/*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
* still here - we will need it once those additional constraints
* pop up.
*/
- return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
}
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
}
-static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
}
-static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes);
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes);
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes);
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes);
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
* Should be extended for other platforms.
return ret;
}
+ if (IS_ALDERLAKE_P(dev_priv))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
continue;
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
- if (IS_ALDERLAKE_P(dev_priv))
- new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
enum pipe pipe = crtc->pipe;
unsigned int mbus_offset;
enum plane_id plane_id;
+ u8 slices;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
}
- dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
-
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
/*
* Used for checking overlaps, so we need absolute
* offsets instead of MBUS relative offsets.
*/
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(dev_priv, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
+
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+void skl_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
-
- if (rpm->available)
- stack_depot_init();
+ stack_depot_init();
}
static noinline depot_stack_handle_t
case LAYER_1:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
break;
- case LAYER_2:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
- break;
- case LAYER_3:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
- break;
}
kmb->plane_status[plane_id].disable = true;
bridge_state =
drm_atomic_get_new_bridge_state(state,
mxsfb->bridge);
- bus_format = bridge_state->input_bus_cfg.format;
+ if (!bridge_state)
+ bus_format = MEDIA_BUS_FMT_FIXED;
+ else
+ bus_format = bridge_state->input_bus_cfg.format;
+
if (bus_format == MEDIA_BUS_FMT_FIXED) {
dev_warn_once(drm->dev,
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
*addr += bios->imaged_addr;
}
- if (unlikely(*addr + size >= bios->size)) {
+ if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false;
}
err = panel_dpi_probe(dev, panel);
if (err)
goto free_ddc;
+ desc = panel->desc;
} else {
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
return ret;
}
- ret = clk_prepare_enable(hdmi->vpll_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
- ret);
- return ret;
- }
-
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
return ret;
}
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+ ret);
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
/*
static const struct vop_win_data rk3399_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3399_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &rk3288_win01_data,
+ { .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &rk3288_win23_data,
+ { .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &rk3288_win23_data,
+ { .base = 0x50, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
- mode = &crtc_state->adjusted_mode;
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
mode->clock * 9 / 10) * 1000;
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
connected = true;
} else {
- unsigned long flags;
- u32 hotplug;
-
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- hotplug = HDMI_READ(HDMI_HOTPLUG);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
- if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
+ if (vc4_hdmi->variant->hp_detect &&
+ vc4_hdmi->variant->hp_detect(vc4_hdmi))
connected = true;
}
unsigned long long tmds_rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return -EINVAL;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
return channel_map;
}
+static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
+{
+ unsigned long flags;
+ u32 hotplug;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ hotplug = HDMI_READ(HDMI_HOTPLUG);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
+}
+
/* HDMI audio codec callbacks */
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
* vc4_hdmi_disable_scrambling() will thus run at boot, make
* sure it's disabled, and avoid any inconsistency.
*/
- vc4_hdmi->scdc_enabled = true;
+ if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
+ vc4_hdmi->scdc_enabled = true;
ret = variant->init_resources(vc4_hdmi);
if (ret)
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
/* Enables HDR metadata */
bool supports_hdr;
+
+ /* Callback for hardware specific hotplug detect */
+ bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi);
};
/* HDMI audio information */
{
union cmd_response cmd_resp;
- /* Get response with status within a max of 800 ms timeout */
+ /* Get response with status within a max of 1600 ms timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response_v2.response == sensor_sts &&
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
return cmd_resp.response_v2.response;
return SENSOR_DISABLED;
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+{
+ if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
+ writel(0, privdata->mmio + AMD_P2C_MSG(4));
+ writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+ }
+}
+
+static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->clear_intr)
+ privdata->mp2_ops->clear_intr(privdata);
+}
+
+static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
+{
+ amd_sfh_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+{
+ int rc;
+
+ pci_intx(privdata->pdev, true);
+
+ rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
+ amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
+ if (rc) {
+ dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n",
+ privdata->pdev->irq, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
+ pci_intx(mp2->pdev, false);
+ amd_sfh_clear_intr(mp2);
}
static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
.response = amd_sfh_wait_response_v2,
+ .clear_intr = amd_sfh_clear_intr_v2,
+ .init_intr = amd_sfh_irq_init_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
}
}
+static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->init_intr)
+ return privdata->mp2_ops->init_intr(privdata);
+
+ return 0;
+}
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
mp2_select_ops(privdata);
+ rc = amd_sfh_irq_init(privdata);
+ if (rc) {
+ dev_err(&pdev->dev, "amd_sfh_irq_init failed\n");
+ return rc;
+ }
+
rc = amd_sfh_hid_client_init(privdata);
- if (rc)
+ if (rc) {
+ amd_sfh_clear_intr(privdata);
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
+ }
+
+ amd_sfh_clear_intr(privdata);
return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
}
}
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
}
}
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
} s;
struct {
u32 cmd_id : 4;
- u32 intr_enable : 1;
+ u32 intr_disable : 1;
u32 rsvd1 : 3;
u32 length : 7;
u32 mem_type : 1;
void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
void (*stop_all)(struct amd_mp2_dev *privdata);
int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+ void (*clear_intr)(struct amd_mp2_dev *privdata);
+ int (*init_intr)(struct amd_mp2_dev *privdata);
};
#endif
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+#define ILLUMINANCE_MASK GENMASK(14, 0)
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
{
get_common_inputs(&als_input.common_property, report_id);
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
if (supported_input == V2_STATUS)
- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+ als_input.illuminance_value =
+ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
else
als_input.illuminance_value =
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
return 0;
err_free:
+ usb_put_dev(udev);
kfree(priv);
return ret;
}
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
+#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
struct regulator *vdd;
struct notifier_block nb;
- struct mutex regulator_mutex;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
container_of(nb, struct i2c_hid_of_goodix, nb);
int ret = NOTIFY_OK;
- mutex_lock(&ihid_goodix->regulator_mutex);
-
switch (event) {
case REGULATOR_EVENT_PRE_DISABLE:
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
break;
}
- mutex_unlock(&ihid_goodix->regulator_mutex);
-
return ret;
}
if (!ihid_goodix)
return -ENOMEM;
- mutex_init(&ihid_goodix->regulator_mutex);
-
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
* long. Holding the controller in reset apparently draws extra
* power.
*/
- mutex_lock(&ihid_goodix->regulator_mutex);
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
- if (ret) {
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ if (ret)
return dev_err_probe(&client->dev, ret,
"regulator notifier request failed\n");
- }
/*
* If someone else is holding the regulator on (or the regulator is
* an always-on one) we might never be told to deassert reset. Do it
- * now. Here we'll assume that someone else might have _just
- * barely_ turned the regulator on so we'll do the full
- * "post_power_delay" just in case.
+ * now... and temporarily bump the regulator reference count just to
+ * make sure it is impossible for this to race with our own notifier!
+ * We also assume that someone else might have _just barely_ turned
+ * the regulator on so we'll do the full "post_power_delay" just in
+ * case.
*/
- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ regulator_disable(ihid_goodix->vdd);
+ }
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
#include "hv_utils_transport.h"
static DEFINE_SPINLOCK(hvt_list_lock);
-static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+static LIST_HEAD(hvt_list);
static void hvt_reset(struct hvutil_transport *hvt)
{
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
ret = sysfs_create_group(kobj, &vmbus_chan_group);
* The calling functions' error handling paths will cleanup the
* empty channel directory.
*/
+ kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n");
return ret;
}
return child_device_obj;
}
-static u64 vmbus_dma_mask = DMA_BIT_MASK(64);
/*
* vmbus_device_register - Register the child device
*/
}
hv_debug_add_dev_dir(child_device_obj);
- child_device_obj->device.dma_mask = &vmbus_dma_mask;
child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
return 0;
err_kset_unregister:
}
if (copy_to_user(ival, &fd, sizeof(fd))) {
- put_unused_fd(fd);
- ret = -EFAULT;
- goto error_free_ib;
+ /*
+ * "Leak" the fd, as there's not much we can do about this
+ * anyway. 'fd' might have been closed already, as
+ * anon_inode_getfd() called fd_install() on it, which made
+ * it reachable by userland.
+ *
+ * Instead of allowing a malicious user to play tricks with
+ * us, rely on the process exit path to do any necessary
+ * cleanup, as in releasing the file, if still needed.
+ */
+ return -EFAULT;
}
return 0;
ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
if (ret) {
rdma_destroy_ah_attr(&ah_attr);
- return -EINVAL;
+ goto deref;
}
spin_lock_irq(&cm_id_priv->lock);
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
};
-static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
- union ib_gid *mgid);
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type);
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
- if (ndev) {
+ if (ndev && !send_only) {
+ enum ib_gid_type gid_type;
union ib_gid mgid;
- cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
- &mgid);
-
- if (!send_only)
- cma_igmp_send(ndev, &mgid, false);
-
- dev_put(ndev);
+ gid_type = id_priv->cma_dev->default_gid_type
+ [id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+ cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
+ gid_type);
+ cma_igmp_send(ndev, &mgid, false);
}
+ dev_put(ndev);
cancel_work_sync(&mc->iboe_join.work);
}
u64 uid;
struct list_head list;
+ struct list_head mc_list;
struct work_struct close_work;
};
u64 uid;
u8 join_state;
+ struct list_head list;
struct sockaddr_storage addr;
};
INIT_WORK(&ctx->close_work, ucma_close_id);
init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
ctx->file = file;
static void ucma_cleanup_multicast(struct ucma_context *ctx)
{
- struct ucma_multicast *mc;
- unsigned long index;
+ struct ucma_multicast *mc, *tmp;
- xa_for_each(&multicast_table, index, mc) {
- if (mc->ctx != ctx)
- continue;
+ xa_lock(&multicast_table);
+ list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
+ list_del(&mc->list);
/*
* At this point mc->ctx->ref is 0 so the mc cannot leave the
* lock on the reader and this is enough serialization
*/
- xa_erase(&multicast_table, index);
+ __xa_erase(&multicast_table, mc->id);
kfree(mc);
}
+ xa_unlock(&multicast_table);
}
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
+ xa_lock(&multicast_table);
+ if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
GFP_KERNEL)) {
ret = -ENOMEM;
goto err_free_mc;
}
+ list_add_tail(&mc->list, &ctx->mc_list);
+ xa_unlock(&multicast_table);
+
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
err_xa_erase:
- xa_erase(&multicast_table, mc->id);
+ xa_lock(&multicast_table);
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
err_free_mc:
+ xa_unlock(&multicast_table);
kfree(mc);
err_put_ctx:
ucma_put_ctx(ctx);
mc = ERR_PTR(-EINVAL);
else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
- else
- __xa_erase(&multicast_table, mc->id);
- xa_unlock(&multicast_table);
if (IS_ERR(mc)) {
+ xa_unlock(&multicast_table);
ret = PTR_ERR(mc);
goto out;
}
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
+ xa_unlock(&multicast_table);
+
mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
*/
struct ipoib_txreq {
struct sdma_txreq txreq;
- struct hfi1_sdma_header sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr;
int sdma_status;
int complete;
struct hfi1_ipoib_dev_priv *priv;
int ret;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
- return ret;
+ goto out_ret;
ret = hfi1_netdev_add_data(priv->dd,
qpn_from_mac(priv->netdev->dev_addr),
dev);
if (ret < 0) {
priv->netdev_ops->ndo_uninit(dev);
- return ret;
+ goto out_ret;
}
return 0;
+out_ret:
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
}
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
priv->netdev_ops->ndo_uninit(dev);
hfi1_ipoib_rxq_deinit(priv->netdev);
free_percpu(dev->tstats);
-}
-
-static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
-{
- hfi1_ipoib_netdev_dtor(dev);
- free_netdev(dev);
+ dev->tstats = NULL;
}
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
priv->port_num = port_num;
priv->netdev_ops = netdev->netdev_ops;
- netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
-
ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
rc = hfi1_ipoib_txreq_init(priv);
if (rc) {
dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
return rc;
}
rc = hfi1_ipoib_rxq_init(netdev);
if (rc) {
dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
+ hfi1_ipoib_txreq_deinit(priv);
return rc;
}
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
netdev->needs_free_netdev = true;
dd_dev_warn(priv->dd,
"%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
__func__, tx->sdma_status,
- le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
tx->txq->sde->this_idx);
}
{
struct hfi1_devdata *dd = txp->dd;
struct sdma_txreq *txreq = &tx->txreq;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
u16 pkt_bytes =
sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
int ret;
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
struct sk_buff *skb = tx->skb;
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
struct rdma_ah_attr *ah_attr = txp->ah_attr;
if (likely(!ret)) {
tx_ok:
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
return NETDEV_TX_OK;
hfi1_ipoib_check_queue_depth(txq);
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
if (!netdev_xmit_more())
{
struct net_device *dev = priv->netdev;
u32 tx_ring_size, tx_item_size;
- int i;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ int i, j;
/*
* Ring holds 1 less than tx_ring_size
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct ipoib_txreq *tx;
+ tx_ring = &txq->tx_ring;
iowait_init(&txq->wait,
0,
hfi1_ipoib_flush_txq,
priv->dd->node);
txq->tx_ring.items =
- kcalloc_node(tx_ring_size, tx_item_size,
- GFP_KERNEL, priv->dd->node);
+ kvzalloc_node(array_size(tx_ring_size, tx_item_size),
+ GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
txq->tx_ring.max_items = tx_ring_size;
- txq->tx_ring.shift = ilog2(tx_ring_size);
+ txq->tx_ring.shift = ilog2(tx_item_size);
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ kzalloc_node(sizeof(*tx->sdma_hdr),
+ GFP_KERNEL, priv->dd->node);
netif_tx_napi_add(dev, &txq->napi,
hfi1_ipoib_poll_tx_ring,
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
netif_napi_del(&txq->napi);
- kfree(txq->tx_ring.items);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
{
- int i;
+ int i, j;
for (i = 0; i < priv->netdev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
iowait_cancel_work(&txq->wait);
iowait_sdma_drain(&txq->wait);
hfi1_ipoib_drain_tx_list(txq);
netif_napi_del(&txq->napi);
hfi1_ipoib_drain_tx_ring(txq);
- kfree(txq->tx_ring.items);
+ for (j = 0; j < tx_ring->max_items; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
if (!ew)
- break;
+ return;
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
+ if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
+ goto inv_err;
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
return &qp->orq[qp->orq_get % qp->attrs.orq_size];
}
-static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
-{
- return &qp->orq[qp->orq_put % qp->attrs.orq_size];
-}
-
static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
{
- struct siw_sqe *orq_e = orq_get_tail(qp);
+ struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
if (READ_ONCE(orq_e->flags) == 0)
return orq_e;
spin_lock_irqsave(&qp->orq_lock, flags);
- rreq = orq_get_current(qp);
-
/* free current orq entry */
+ rreq = orq_get_current(qp);
WRITE_ONCE(rreq->flags, 0);
+ qp->orq_get++;
+
if (qp->tx_ctx.orq_fence) {
if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
rv = -EPROTO;
goto out;
}
- /* resume SQ processing */
+ /* resume SQ processing, if possible */
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
- rreq = orq_get_tail(qp);
+
+ /* SQ processing was stopped because of a full ORQ */
+ rreq = orq_get_free(qp);
if (unlikely(!rreq)) {
pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
rv = -EPROTO;
resume_tx = 1;
} else if (siw_orq_empty(qp)) {
+ /*
+ * SQ processing was stopped by fenced work request.
+ * Resume since all previous Read's are now completed.
+ */
qp->tx_ctx.orq_fence = 0;
resume_tx = 1;
- } else {
- pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
- qp_id(qp), qp->orq_get, qp->orq_put);
- rv = -EPROTO;
}
}
- qp->orq_get++;
out:
spin_unlock_irqrestore(&qp->orq_lock, flags);
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto err_atomic;
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
* extensions)
*/
wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
- if (!wm->touch_dev) {
- ret = -ENOMEM;
- goto touch_err;
- }
+ if (!wm->touch_dev)
+ return -ENOMEM;
+
platform_set_drvdata(wm->touch_dev, wm);
wm->touch_dev->dev.parent = wm->dev;
wm->touch_dev->dev.platform_data = pdata;
return 0;
touch_reg_err:
platform_device_put(wm->touch_dev);
-touch_err:
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
return ret;
}
static void wm97xx_unregister_touch(struct wm97xx *wm)
{
platform_device_unregister(wm->touch_dev);
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
}
static int _wm97xx_probe(struct wm97xx *wm)
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/cc_platform.h>
+#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break;
+ udelay(10);
}
if (WARN_ON(i >= LOOP_TIMEOUT))
fn, &intel_ir_domain_ops,
iommu);
if (!iommu->ir_domain) {
- irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
- goto out_free_bitmap;
+ goto out_free_fwnode;
}
iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain,
if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n");
- goto out_free_bitmap;
+ goto out_free_ir_domain;
}
}
return 0;
+out_free_ir_domain:
+ if (iommu->ir_msi_domain)
+ irq_domain_remove(iommu->ir_msi_domain);
+ iommu->ir_msi_domain = NULL;
+ irq_domain_remove(iommu->ir_domain);
+ iommu->ir_domain = NULL;
+out_free_fwnode:
+ irq_domain_free_fwnode(fn);
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
/**
* ioasid_get - obtain a reference to the IOASID
+ * @ioasid: the ID to get
*/
void ioasid_get(ioasid_t ioasid)
{
static void dev_iommu_free(struct device *dev)
{
- iommu_fwspec_free(dev);
- kfree(dev->iommu);
+ struct dev_iommu *param = dev->iommu;
+
dev->iommu = NULL;
+ if (param->fwspec) {
+ fwnode_handle_put(param->fwspec->iommu_fwnode);
+ kfree(param->fwspec);
+ }
+ kfree(param);
}
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
return ret;
}
-/**
- * iommu_group_for_each_dev - iterate over each device in the group
- * @group: the group
- * @data: caller opaque data to be passed to callback function
- * @fn: caller supplied callback function
- *
- * This function is called by group users to iterate over group devices.
- * Callers should hold a reference count to the group during callback.
- * The group->mutex is held across callbacks, which will block calls to
- * iommu_group_add/remove_device.
- */
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
return ret;
}
-
+/**
+ * iommu_group_for_each_dev - iterate over each device in the group
+ * @group: the group
+ * @data: caller opaque data to be passed to callback function
+ * @fn: caller supplied callback function
+ *
+ * This function is called by group users to iterate over group devices.
+ * Callers should hold a reference count to the group during callback.
+ * The group->mutex is held across callbacks, which will block calls to
+ * iommu_group_add/remove_device.
+ */
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to it
+ * @drvdata: opaque data pointer to pass to bind callback
*
* Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and
}
/**
- * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ * omap_iommu_prepare - prepare() dev_pm_ops implementation
* @dev: iommu device
*
* This function performs the necessary checks to determine if the IOMMU
if (!efi_enabled(EFI_CONFIG_TABLES))
return 0;
+ if (list_empty(&its_nodes))
+ return 0;
+
gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"irqchip/arm/gicv3/memreserve:online",
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}
- /* Set the NOWAIT flags if all underlying devices support it */
- if (nowait)
- blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
-
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+
+ /* Set the NOWAIT flags if all underlying devices support it */
+ if (nowait)
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
if (offset + count > EE1004_PAGE_SIZE)
count = EE1004_PAGE_SIZE - offset;
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+
return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
}
if (copy_to_user(argp, &bp, sizeof(bp))) {
- dma_buf_put(buf->dmabuf);
+ /*
+ * The usercopy failed, but we can't do much about it, as
+ * dma_buf_fd() already called fd_install() and made the
+ * file descriptor accessible for the current process. It
+ * might already be closed and dmabuf no longer valid when
+ * we reach this point. Therefore "leak" the fd and rely on
+ * the process exit path to do any required cleanup.
+ */
return -EFAULT;
}
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
- int retries = 0;
do {
u32 status;
int err;
+ int retries = 0;
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
- mmc_wait_for_req(host, mrq);
+ mmc_wait_for_req(host, mrq);
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
+ err = mmc_send_status(card, &status);
if (err)
goto error_exit;
- }
- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
- continue;
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
- retries = 0;
+ if (!mrq->cmd->error)
+ break;
+ }
if (mrq->cmd->error ||
mrq->data->error ||
__res & __mask; \
})
-#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
struct sd_busy_data {
goto out;
}
+ /* Find out when the command is completed. */
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
cb_data.card = card;
cb_data.reg_buf = reg_buf;
err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
writel(0, host->base + REG_INTERRUPT_MASK);
writel(0, host->base + REG_POWER_CONTROL);
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
+ mmc_free_host(mmc);
return 0;
}
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
+ int ret;
u32 value;
struct device *dev = mmc_dev(host->mmc);
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
- of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+ }
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
dev_sw_netstats_rx_add(amt->dev, len);
} else {
skb->pkt_type = PACKET_MULTICAST;
skb->ip_summed = CHECKSUM_NONE;
len = skb->len;
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
true);
dev_sw_netstats_rx_add(amt->dev, len);
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
-
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
spin_unlock_bh(&bond->mode_lock);
}
+/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
config NET_DSA_SMSC_LAN9303
tristate
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_TAG_LAN9303
select REGMAP
help
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
+ if (err && dn) {
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
+ }
return err;
}
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
+ int err;
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
- if (mdio_np)
+ if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
if (priv->irq)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
[MV88E6185_PORT_STS_CMODE_PHY] = PHY_INTERFACE_MODE_SGMII,
};
+static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ u8 cmode = chip->ports[port].cmode;
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
+
+ if (mv88e6xxx_phy_is_internal(chip->ds, port)) {
+ __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces);
+ } else {
+ if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) &&
+ mv88e6185_phy_interface_modes[cmode])
+ __set_bit(mv88e6185_phy_interface_modes[cmode],
+ config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_1000FD;
+ }
+}
+
static void mv88e6185_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
struct phylink_config *config)
{
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
{
+ struct device_node *phy_handle = NULL;
struct dsa_switch *ds = chip->ds;
+ struct dsa_port *dp;
+ int tx_amp;
int err;
u16 reg;
return err;
}
+ if (chip->info->ops->serdes_set_tx_amplitude) {
+ dp = dsa_to_port(ds, port);
+ if (dp)
+ phy_handle = of_parse_phandle(dp->dn, "phy-handle", 0);
+
+ if (phy_handle && !of_property_read_u32(phy_handle,
+ "tx-p2p-microvolt",
+ &tx_amp))
+ err = chip->info->ops->serdes_set_tx_amplitude(chip,
+ port, tx_amp);
+ if (phy_handle) {
+ of_node_put(phy_handle);
+ if (err)
+ return err;
+ }
+ }
+
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
return err;
}
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
- return err;
+ goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
+ goto out;
}
if (external)
list_add(&mdio_bus->list, &chip->mdios);
return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
+ mdiobus_free(bus);
}
}
.reset = mv88e6185_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
.rmu_disable = mv88e6085_g1_rmu_disable,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e6095_phylink_get_caps,
.set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_get_caps = mv88e6352_phylink_get_caps,
};
.serdes_irq_status = mv88e6352_serdes_irq_status,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
.serdes_get_stats = mv88e6352_serdes_get_stats,
.serdes_get_regs_len = mv88e6352_serdes_get_regs_len,
.serdes_get_regs = mv88e6352_serdes_get_regs,
+ .serdes_set_tx_amplitude = mv88e6352_serdes_set_tx_amplitude,
.phylink_get_caps = mv88e6352_phylink_get_caps,
};
void (*serdes_get_regs)(struct mv88e6xxx_chip *chip, int port,
void *_p);
+ /* SERDES SGMII/Fiber Output Amplitude */
+ int (*serdes_set_tx_amplitude)(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Address Translation Unit operations */
int (*atu_get_hash)(struct mv88e6xxx_chip *chip, u8 *hash);
int (*atu_set_hash)(struct mv88e6xxx_chip *chip, u8 hash);
}
}
+static const int mv88e6352_serdes_p2p_to_reg[] = {
+ /* Index of value in microvolts corresponds to the register value */
+ 14000, 112000, 210000, 308000, 406000, 504000, 602000, 700000,
+};
+
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val)
+{
+ bool found = false;
+ u16 ctrl, reg;
+ int err;
+ int i;
+
+ err = mv88e6352_g2_scratch_port_has_serdes(chip, port);
+ if (err <= 0)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_p2p_to_reg); ++i) {
+ if (mv88e6352_serdes_p2p_to_reg[i] == val) {
+ reg = i;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_SPEC_CTRL2, &ctrl);
+ if (err)
+ return err;
+
+ ctrl &= ~MV88E6352_SERDES_OUT_AMP_MASK;
+ ctrl |= reg;
+
+ return mv88e6352_serdes_write(chip, MV88E6352_SERDES_SPEC_CTRL2, ctrl);
+}
+
static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
bool on)
{
#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4)
#define MV88E6352_SERDES_INT_STATUS 0x13
+#define MV88E6352_SERDES_SPEC_CTRL2 0x1a
+#define MV88E6352_SERDES_OUT_AMP_MASK 0x0007
#define MV88E6341_PORT5_LANE 0x15
int mv88e6390_serdes_get_regs_len(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p);
+int mv88e6352_serdes_set_tx_amplitude(struct mv88e6xxx_chip *chip, int port,
+ int val);
+
/* Return the (first) SERDES lane address a port is using, -errno otherwise. */
static inline int mv88e6xxx_serdes_get_lane(struct mv88e6xxx_chip *chip,
int port)
outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
outer_tagging_rule->prio = 1;
- outer_tagging_rule->id.cookie = port;
+ outer_tagging_rule->id.cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port);
outer_tagging_rule->id.tc_offload = false;
outer_tagging_rule->block_id = VCAP_ES0;
outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
untagging_rule->vlan.vid.value = vid;
untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
untagging_rule->prio = 1;
- untagging_rule->id.cookie = port;
+ untagging_rule->id.cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port);
untagging_rule->id.tc_offload = false;
untagging_rule->block_id = VCAP_IS1;
untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
redirect_rule->ingress_port_mask = BIT(upstream);
redirect_rule->pag = port;
redirect_rule->prio = 1;
- redirect_rule->id.cookie = port;
+ redirect_rule->id.cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port);
redirect_rule->id.tc_offload = false;
redirect_rule->block_id = VCAP_IS2;
redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
mutex_unlock(&ocelot->fwd_domain_lock);
}
-/* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
- * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
- * tag_8021q CPU port.
+/* On switches with no extraction IRQ wired, trapped packets need to be
+ * replicated over Ethernet as well, otherwise we'd get no notification of
+ * their arrival when using the ocelot-8021q tagging protocol.
*/
-static int felix_setup_mmio_filtering(struct felix *felix)
+static int felix_update_trapping_destinations(struct dsa_switch *ds,
+ bool using_tag_8021q)
{
- unsigned long user_ports = dsa_user_ports(felix->ds);
- struct ocelot_vcap_filter *redirect_rule;
- struct ocelot_vcap_filter *tagging_rule;
- struct ocelot *ocelot = &felix->ocelot;
- struct dsa_switch *ds = felix->ds;
- int cpu = -1, port, ret;
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct ocelot_vcap_filter *trap;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ struct dsa_port *dp;
+ bool cpu_copy_ena;
+ int cpu = -1, err;
- tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!tagging_rule)
- return -ENOMEM;
+ if (!felix->info->quirk_no_xtr_irq)
+ return 0;
- redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
- if (!redirect_rule) {
- kfree(tagging_rule);
- return -ENOMEM;
+ /* Figure out the current CPU port */
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ cpu = dp->index;
+ break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_cpu_port(ds, port)) {
- cpu = port;
- break;
+ /* We are sure that "cpu" was found, otherwise
+ * dsa_tree_setup_default_cpu() would have failed earlier.
+ */
+
+ /* Make sure all traps are set up for that destination */
+ list_for_each_entry(trap, &ocelot->traps, trap_list) {
+ /* Figure out the current trapping destination */
+ if (using_tag_8021q) {
+ /* Redirect to the tag_8021q CPU port. If timestamps
+ * are necessary, also copy trapped packets to the CPU
+ * port module.
+ */
+ mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ port_mask = BIT(cpu);
+ cpu_copy_ena = !!trap->take_ts;
+ } else {
+ /* Trap packets only to the CPU port module, which is
+ * redirected to the NPI port (the DSA CPU port)
+ */
+ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ port_mask = 0;
+ cpu_copy_ena = true;
}
- }
- if (cpu < 0) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return -EINVAL;
- }
+ if (trap->action.mask_mode == mask_mode &&
+ trap->action.port_mask == port_mask &&
+ trap->action.cpu_copy_ena == cpu_copy_ena)
+ continue;
- tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
- *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
- *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
- tagging_rule->ingress_port_mask = user_ports;
- tagging_rule->prio = 1;
- tagging_rule->id.cookie = ocelot->num_phys_ports;
- tagging_rule->id.tc_offload = false;
- tagging_rule->block_id = VCAP_IS1;
- tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- tagging_rule->lookup = 0;
- tagging_rule->action.pag_override_mask = 0xff;
- tagging_rule->action.pag_val = ocelot->num_phys_ports;
-
- ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
- if (ret) {
- kfree(tagging_rule);
- kfree(redirect_rule);
- return ret;
- }
+ trap->action.mask_mode = mask_mode;
+ trap->action.port_mask = port_mask;
+ trap->action.cpu_copy_ena = cpu_copy_ena;
- redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
- redirect_rule->ingress_port_mask = user_ports;
- redirect_rule->pag = ocelot->num_phys_ports;
- redirect_rule->prio = 1;
- redirect_rule->id.cookie = ocelot->num_phys_ports;
- redirect_rule->id.tc_offload = false;
- redirect_rule->block_id = VCAP_IS2;
- redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
- redirect_rule->lookup = 0;
- redirect_rule->action.cpu_copy_ena = true;
- if (felix->info->quirk_no_xtr_irq) {
- /* Redirect to the tag_8021q CPU but also copy PTP packets to
- * the CPU port module
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
- redirect_rule->action.port_mask = BIT(cpu);
- } else {
- /* Trap PTP packets only to the CPU port module (which is
- * redirected to the NPI port)
- */
- redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- redirect_rule->action.port_mask = 0;
- }
-
- ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
- if (ret) {
- ocelot_vcap_filter_del(ocelot, tagging_rule);
- kfree(redirect_rule);
- return ret;
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err)
+ return err;
}
- /* The ownership of the CPU port module's queues might have just been
- * transferred to the tag_8021q tagger from the NPI-based tagger.
- * So there might still be all sorts of crap in the queues. On the
- * other hand, the MMIO-based matching of PTP frames is very brittle,
- * so we need to be careful that there are no extra frames to be
- * dequeued over MMIO, since we would never know to discard them.
- */
- ocelot_drain_cpu_queue(ocelot, 0);
-
return 0;
}
-static int felix_teardown_mmio_filtering(struct felix *felix)
-{
- struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
- struct ocelot_vcap_block *block_vcap_is1;
- struct ocelot_vcap_block *block_vcap_is2;
- struct ocelot *ocelot = &felix->ocelot;
- int err;
-
- block_vcap_is1 = &ocelot->block[VCAP_IS1];
- block_vcap_is2 = &ocelot->block[VCAP_IS2];
-
- tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
- ocelot->num_phys_ports,
- false);
- if (!tagging_rule)
- return -ENOENT;
-
- err = ocelot_vcap_filter_del(ocelot, tagging_rule);
- if (err)
- return err;
-
- redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
- ocelot->num_phys_ports,
- false);
- if (!redirect_rule)
- return -ENOENT;
-
- return ocelot_vcap_filter_del(ocelot, redirect_rule);
-}
-
static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
unsigned long cpu_flood;
- int port, err;
+ struct dsa_port *dp;
+ int err;
felix_8021q_cpu_port_init(ocelot, cpu);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* This overwrites ocelot_init():
* Do not forward BPDU frames to the CPU port module,
* for 2 reasons:
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
- ANA_PORT_CPU_FWD_BPDU_CFG, port);
+ ANA_PORT_CPU_FWD_BPDU_CFG, dp->index);
}
/* In tag_8021q mode, the CPU port module is unused, except for PTP
if (err)
return err;
- err = felix_setup_mmio_filtering(felix);
+ err = felix_update_trapping_destinations(ds, true);
if (err)
goto out_tag_8021q_unregister;
+ /* The ownership of the CPU port module's queues might have just been
+ * transferred to the tag_8021q tagger from the NPI-based tagger.
+ * So there might still be all sorts of crap in the queues. On the
+ * other hand, the MMIO-based matching of PTP frames is very brittle,
+ * so we need to be careful that there are no extra frames to be
+ * dequeued over MMIO, since we would never know to discard them.
+ */
+ ocelot_drain_cpu_queue(ocelot, 0);
+
return 0;
out_tag_8021q_unregister:
static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
{
struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
- int err, port;
+ struct dsa_port *dp;
+ int err;
- err = felix_teardown_mmio_filtering(felix);
+ err = felix_update_trapping_destinations(ds, false);
if (err)
dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
err);
dsa_tag_8021q_unregister(ds);
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
+ dsa_switch_for_each_available_port(dp, ds) {
/* Restore the logic from ocelot_init:
* do not forward BPDU frames to the front ports.
*/
ocelot_write_gix(ocelot,
ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
ANA_PORT_CPU_FWD_BPDU_CFG,
- port);
+ dp->index);
}
felix_8021q_cpu_port_deinit(ocelot, cpu);
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port, err;
+ struct dsa_port *dp;
+ int err;
err = felix_init_structs(felix, ds->num_ports);
if (err)
}
}
- for (port = 0; port < ds->num_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_init_port(ocelot, port);
+ dsa_switch_for_each_available_port(dp, ds) {
+ ocelot_init_port(ocelot, dp->index);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
*/
- felix_port_qos_map_init(ocelot, port);
+ felix_port_qos_map_init(ocelot, dp->index);
}
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
-
+ dsa_switch_for_each_cpu_port(dp, ds) {
/* The initial tag protocol is NPI which always returns 0, so
* there's no real point in checking for errors.
*/
- felix_set_tag_protocol(ds, port, felix->tag_proto);
+ felix_set_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
return 0;
out_deinit_ports:
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_deinit_timestamp(ocelot);
ocelot_deinit(ocelot);
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
- int port;
-
- for (port = 0; port < ds->num_ports; port++) {
- if (!dsa_is_cpu_port(ds, port))
- continue;
+ struct dsa_port *dp;
- felix_del_tag_protocol(ds, port, felix->tag_proto);
+ dsa_switch_for_each_cpu_port(dp, ds) {
+ felix_del_tag_protocol(ds, dp->index, felix->tag_proto);
break;
}
- for (port = 0; port < ocelot->num_phys_ports; port++) {
- if (dsa_is_unused_port(ds, port))
- continue;
-
- ocelot_deinit_port(ocelot, port);
- }
+ dsa_switch_for_each_available_port(dp, ds)
+ ocelot_deinit_port(ocelot, dp->index);
ocelot_devlink_sb_unregister(ocelot);
ocelot_deinit_timestamp(ocelot);
struct ifreq *ifr)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_hwstamp_set(ocelot, port, ifr);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_hwstamp_set(ocelot, port, ifr);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
struct flow_cls_offload *cls, bool ingress)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ bool using_tag_8021q;
+ int err;
+
+ err = ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ if (err)
+ return err;
+
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
- return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
+ return felix_update_trapping_destinations(ds, using_tag_8021q);
}
static int felix_cls_flower_del(struct dsa_switch *ds, int port,
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
}
/* Needed in order to initialize the bus mutex lock */
- rc = of_mdiobus_register(bus, NULL);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
mdio_device_free(mdio_device);
lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
/* TODO: if power is software controlled, set up any regulators here */
priv->leds_disabled = of_property_read_bool(np, "realtek,disable-leds");
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset)) {
+ dev_err(dev, "failed to get RESET GPIO\n");
+ return PTR_ERR(priv->reset);
+ }
+
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
+
ret = priv->ops->detect(priv);
if (ret) {
dev_err(dev, "unable to detect switch\n");
dsa_unregister_switch(priv->ds);
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
+
dev_set_drvdata(&mdiodev->dev, NULL);
}
#include "realtek.h"
#define REALTEK_SMI_ACK_RETRY_COUNT 5
-#define REALTEK_SMI_HW_STOP_DELAY 25 /* msecs */
-#define REALTEK_SMI_HW_START_DELAY 100 /* msecs */
static inline void realtek_smi_clk_delay(struct realtek_priv *priv)
{
/* TODO: if power is software controlled, set up any regulators here */
- /* Assert then deassert RESET */
- priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(priv->reset)) {
dev_err(dev, "failed to get RESET GPIO\n");
return PTR_ERR(priv->reset);
}
- msleep(REALTEK_SMI_HW_STOP_DELAY);
- gpiod_set_value(priv->reset, 0);
- msleep(REALTEK_SMI_HW_START_DELAY);
- dev_info(dev, "deasserted RESET\n");
+ if (priv->reset) {
+ gpiod_set_value(priv->reset, 1);
+ dev_dbg(dev, "asserted RESET\n");
+ msleep(REALTEK_HW_STOP_DELAY);
+ gpiod_set_value(priv->reset, 0);
+ msleep(REALTEK_HW_START_DELAY);
+ dev_dbg(dev, "deasserted RESET\n");
+ }
/* Fetch MDIO pins */
priv->mdc = devm_gpiod_get_optional(dev, "mdc", GPIOD_OUT_LOW);
dsa_unregister_switch(priv->ds);
if (priv->slave_mii_bus)
of_node_put(priv->slave_mii_bus->dev.of_node);
- gpiod_set_value(priv->reset, 1);
+
+ /* leave the device reset asserted */
+ if (priv->reset)
+ gpiod_set_value(priv->reset, 1);
platform_set_drvdata(pdev, NULL);
* Copyright (C) 2009-2010 Gabor Juhos <juhosg@openwrt.org>
*/
-#ifndef _REALTEK_SMI_H
-#define _REALTEK_SMI_H
+#ifndef _REALTEK_H
+#define _REALTEK_H
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/gpio/consumer.h>
#include <net/dsa.h>
+#define REALTEK_HW_STOP_DELAY 25 /* msecs */
+#define REALTEK_HW_START_DELAY 100 /* msecs */
+
struct realtek_ops;
struct dentry;
struct inode;
extern const struct realtek_variant rtl8366rb_variant;
extern const struct realtek_variant rtl8365mb_variant;
-#endif /* _REALTEK_SMI_H */
+#endif /* _REALTEK_H */
{
int delay = 0;
- /* if DMA is busy, wait for current transactino to finish */
+ /* if DMA is busy, wait for current transaction to finish */
while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
*/
#define ALTERA_RXDMABUFFER_SIZE 2048
-/* Allow network stack to resume queueing packets after we've
+/* Allow network stack to resume queuing packets after we've
* finished transmitting at least 1/4 of the packets in the queue.
*/
#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
- /* DMA trasfer from TSE starts with 2 aditional bytes for
+ /* DMA transfer from TSE starts with 2 additional bytes for
* IP payload alignment. Status returned by get_rx_status()
* contains DMA transfer length. Packet is 2 bytes shorter.
*/
csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode_hashfilter(struct net_device *dev)
{
spin_unlock(&priv->mac_cfg_lock);
}
-/* Set or clear the multicast filter for this adaptor
+/* Set or clear the multicast filter for this adapter
*/
static void tse_set_rx_mode(struct net_device *dev)
{
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
* compute the delta in terms of coprocessor clocks.
*/
delta = (u64)ppb << 32;
- div64_u64(delta, oct->coproc_clock_rate);
+ do_div(delta, oct->coproc_clock_rate);
spin_lock_irqsave(&lio->ptp_lock, flags);
comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
u64 clock_comp, cfg;
clock_comp = (u64)NSEC_PER_SEC << 32;
- div64_u64(clock_comp, oct->coproc_clock_rate);
+ do_div(clock_comp, oct->coproc_clock_rate);
lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
/* Enable */
# Davicom device configuration
#
+config NET_VENDOR_DAVICOM
+ bool "Davicom devices"
+ default y
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Davicom devices. If you say Y, you will be asked
+ for your specific card in the following selections.
+
+if NET_VENDOR_DAVICOM
+
config DM9000
tristate "DM9000 support"
depends on ARM || MIPS || COLDFIRE || NIOS2 || COMPILE_TEST
bit to determine if the link is up or down instead of the more
costly MII PHY reads. Note, this will not work if the chip is
operating with an external PHY.
+
+config DM9051
+ tristate "DM9051 SPI support"
+ depends on SPI
+ select CRC32
+ select MDIO
+ select PHYLIB
+ select REGMAP_SPI
+ help
+ Support for DM9051 SPI chipset.
+
+ To compile this driver as a module, choose M here. The module
+ will be called dm9051.
+
+ The SPI mode for the host's SPI master to access DM9051 is mode
+ 0 on the SPI bus.
+
+endif # NET_VENDOR_DAVICOM
#
obj-$(CONFIG_DM9000) += dm9000.o
+obj-$(CONFIG_DM9051) += dm9051.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "dm9051.h"
+
+#define DRVNAME_9051 "dm9051"
+
+/**
+ * struct rx_ctl_mach - rx activities record
+ * @status_err_counter: rx status error counter
+ * @large_err_counter: rx get large packet length error counter
+ * @rx_err_counter: receive packet error counter
+ * @tx_err_counter: transmit packet error counter
+ * @fifo_rst_counter: reset operation counter
+ *
+ * To keep track for the driver operation statistics
+ */
+struct rx_ctl_mach {
+ u16 status_err_counter;
+ u16 large_err_counter;
+ u16 rx_err_counter;
+ u16 tx_err_counter;
+ u16 fifo_rst_counter;
+};
+
+/**
+ * struct dm9051_rxctrl - dm9051 driver rx control
+ * @hash_table: Multicast hash-table data
+ * @rcr_all: KS_RXCR1 register setting
+ *
+ * The settings needs to control the receive filtering
+ * such as the multicast hash-filter and the receive register settings
+ */
+struct dm9051_rxctrl {
+ u16 hash_table[4];
+ u8 rcr_all;
+};
+
+/**
+ * struct dm9051_rxhdr - rx packet data header
+ * @headbyte: lead byte equal to 0x01 notifies a valid packet
+ * @status: status bits for the received packet
+ * @rxlen: packet length
+ *
+ * The Rx packed, entered into the FIFO memory, start with these
+ * four bytes which is the Rx header, followed by the ethernet
+ * packet data and ends with an appended 4-byte CRC data.
+ * Both Rx packet and CRC data are for check purpose and finally
+ * are dropped by this driver
+ */
+struct dm9051_rxhdr {
+ u8 headbyte;
+ u8 status;
+ __le16 rxlen;
+};
+
+/**
+ * struct board_info - maintain the saved data
+ * @spidev: spi device structure
+ * @ndev: net device structure
+ * @mdiobus: mii bus structure
+ * @phydev: phy device structure
+ * @txq: tx queue structure
+ * @regmap_dm: regmap for register read/write
+ * @regmap_dmbulk: extra regmap for bulk read/write
+ * @rxctrl_work: Work queue for updating RX mode and multicast lists
+ * @tx_work: Work queue for tx packets
+ * @pause: ethtool pause parameter structure
+ * @spi_lockm: between threads lock structure
+ * @reg_mutex: regmap access lock structure
+ * @bc: rx control statistics structure
+ * @rxhdr: rx header structure
+ * @rctl: rx control setting structure
+ * @msg_enable: message level value
+ * @imr_all: to store operating imr value for register DM9051_IMR
+ * @lcr_all: to store operating rcr value for register DM9051_LMCR
+ *
+ * The saved data variables, keep up to date for retrieval back to use
+ */
+struct board_info {
+ u32 msg_enable;
+ struct spi_device *spidev;
+ struct net_device *ndev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+ struct sk_buff_head txq;
+ struct regmap *regmap_dm;
+ struct regmap *regmap_dmbulk;
+ struct work_struct rxctrl_work;
+ struct work_struct tx_work;
+ struct ethtool_pauseparam pause;
+ struct mutex spi_lockm;
+ struct mutex reg_mutex;
+ struct rx_ctl_mach bc;
+ struct dm9051_rxhdr rxhdr;
+ struct dm9051_rxctrl rctl;
+ u8 imr_all;
+ u8 lcr_all;
+};
+
+static int dm9051_set_reg(struct board_info *db, unsigned int reg, unsigned int val)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, reg, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d set reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_update_bits(struct board_info *db, unsigned int reg, unsigned int mask,
+ unsigned int val)
+{
+ int ret;
+
+ ret = regmap_update_bits(db->regmap_dm, reg, mask, val);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d update bits reg %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* skb buffer exhausted, just discard the received data
+ */
+static int dm9051_dumpblk(struct board_info *db, u8 reg, size_t count)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rb;
+ int ret;
+
+ /* no skb buffer,
+ * both reg and &rb must be noinc,
+ * read once one byte via regmap_read
+ */
+ do {
+ ret = regmap_read(db->regmap_dm, reg, &rb);
+ if (ret < 0) {
+ netif_err(db, drv, ndev, "%s: error %d dumping read reg %02x\n",
+ __func__, ret, reg);
+ break;
+ }
+ } while (--count);
+
+ return ret;
+}
+
+static int dm9051_set_regs(struct board_info *db, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_get_regs(struct board_info *db, unsigned int reg, void *val,
+ size_t val_count)
+{
+ int ret;
+
+ ret = regmap_bulk_read(db->regmap_dmbulk, reg, val, val_count);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d bulk reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_write_mem(struct board_info *db, unsigned int reg, const void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_write(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc writing regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+static int dm9051_read_mem(struct board_info *db, unsigned int reg, void *buff,
+ size_t len)
+{
+ int ret;
+
+ ret = regmap_noinc_read(db->regmap_dm, reg, buff, len);
+ if (ret < 0)
+ netif_err(db, drv, db->ndev, "%s: error %d noinc reading regs %02x\n",
+ __func__, ret, reg);
+ return ret;
+}
+
+/* waiting tx-end rather than tx-req
+ * got faster
+ */
+static int dm9051_nsr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_NSR, mval,
+ mval & (NSR_TX2END | NSR_TX1END), 1, 20);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "timeout in checking for tx end\n");
+ return ret;
+}
+
+static int dm9051_epcr_poll(struct board_info *db)
+{
+ unsigned int mval;
+ int ret;
+
+ ret = regmap_read_poll_timeout(db->regmap_dm, DM9051_EPCR, mval,
+ !(mval & EPCR_ERRE), 100, 10000);
+ if (ret == -ETIMEDOUT)
+ netdev_err(db->ndev, "eeprom/phy in processing get timeout\n");
+ return ret;
+}
+
+static int dm9051_irq_flag(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int irq_type = irq_get_trigger_type(spi->irq);
+
+ if (irq_type)
+ return irq_type;
+
+ return IRQF_TRIGGER_LOW;
+}
+
+static unsigned int dm9051_intcr_value(struct board_info *db)
+{
+ return (dm9051_irq_flag(db) == IRQF_TRIGGER_LOW) ?
+ INTCR_POL_LOW : INTCR_POL_HIGH;
+}
+
+static int dm9051_set_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_set_reg(db, DM9051_FCR, fcr);
+}
+
+static int dm9051_set_recv(struct board_info *db)
+{
+ int ret;
+
+ ret = dm9051_set_regs(db, DM9051_MAR, db->rctl.hash_table, sizeof(db->rctl.hash_table));
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, db->rctl.rcr_all); /* enable rx */
+}
+
+static int dm9051_core_reset(struct board_info *db)
+{
+ int ret;
+
+ db->bc.fifo_rst_counter++;
+
+ ret = regmap_write(db->regmap_dm, DM9051_NCR, NCR_RST); /* NCR reset */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_MBNDRY, MBNDRY_BYTE); /* MemBound */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_PPCR, PPCR_PAUSE_COUNT); /* Pause Count */
+ if (ret)
+ return ret;
+ ret = regmap_write(db->regmap_dm, DM9051_LMCR, db->lcr_all); /* LEDMode1 */
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_INTCR, dm9051_intcr_value(db));
+}
+
+static int dm9051_update_fcr(struct board_info *db)
+{
+ u8 fcr = 0;
+
+ if (db->pause.rx_pause)
+ fcr |= FCR_BKPM | FCR_FLCE;
+ if (db->pause.tx_pause)
+ fcr |= FCR_TXPEN;
+
+ return dm9051_update_bits(db, DM9051_FCR, FCR_RXTX_BITS, fcr);
+}
+
+static int dm9051_disable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, IMR_PAR); /* disable int */
+}
+
+static int dm9051_enable_interrupt(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_IMR, db->imr_all); /* enable int */
+}
+
+static int dm9051_stop_mrcmd(struct board_info *db)
+{
+ return dm9051_set_reg(db, DM9051_ISR, ISR_STOP_MRCMD); /* to stop mrcmd */
+}
+
+static int dm9051_clear_interrupt(struct board_info *db)
+{
+ return dm9051_update_bits(db, DM9051_ISR, ISR_CLR_INT, ISR_CLR_INT);
+}
+
+static int dm9051_eeprom_read(struct board_info *db, int offset, u8 *to)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, to, 2);
+}
+
+static int dm9051_eeprom_write(struct board_info *db, int offset, u8 *data)
+{
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, data, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_WEP | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_phyread(void *context, unsigned int reg, unsigned int *val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_ERPRR | EPCR_EPOS);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+ if (ret)
+ return ret;
+
+ /* this is a 4 bytes data, clear to zero since following regmap_bulk_read
+ * only fill lower 2 bytes
+ */
+ *val = 0;
+ return regmap_bulk_read(db->regmap_dmbulk, DM9051_EPDRL, val, 2);
+}
+
+static int dm9051_phywrite(void *context, unsigned int reg, unsigned int val)
+{
+ struct board_info *db = context;
+ int ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPAR, DM9051_PHY | reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_write(db->regmap_dmbulk, DM9051_EPDRL, &val, 2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(db->regmap_dm, DM9051_EPCR, EPCR_EPOS | EPCR_ERPRW);
+ if (ret)
+ return ret;
+
+ ret = dm9051_epcr_poll(db);
+ if (ret)
+ return ret;
+
+ return regmap_write(db->regmap_dm, DM9051_EPCR, 0);
+}
+
+static int dm9051_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct board_info *db = bus->priv;
+ unsigned int val = 0xffff;
+ int ret;
+
+ if (addr == DM9051_PHY_ADDR) {
+ ret = dm9051_phyread(db, regnum, &val);
+ if (ret)
+ return ret;
+ }
+
+ return val;
+}
+
+static int dm9051_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ struct board_info *db = bus->priv;
+
+ if (addr == DM9051_PHY_ADDR)
+ return dm9051_phywrite(db, regnum, val);
+
+ return -ENODEV;
+}
+
+static void dm9051_reg_lock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_lock(&db->reg_mutex);
+}
+
+static void dm9051_reg_unlock_mutex(void *dbcontext)
+{
+ struct board_info *db = dbcontext;
+
+ mutex_unlock(&db->reg_mutex);
+}
+
+static struct regmap_config regconfigdm = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+};
+
+static struct regmap_config regconfigdmbulk = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+ .reg_stride = 1,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = 0,
+ .write_flag_mask = DM_SPI_WR,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .lock = dm9051_reg_lock_mutex,
+ .unlock = dm9051_reg_unlock_mutex,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static int dm9051_map_init(struct spi_device *spi, struct board_info *db)
+{
+ /* create two regmap instances,
+ * split read/write and bulk_read/bulk_write to individual regmap
+ * to resolve regmap execution confliction problem
+ */
+ regconfigdm.lock_arg = db;
+ db->regmap_dm = devm_regmap_init_spi(db->spidev, ®configdm);
+ if (IS_ERR(db->regmap_dm))
+ return PTR_ERR(db->regmap_dm);
+
+ regconfigdmbulk.lock_arg = db;
+ db->regmap_dmbulk = devm_regmap_init_spi(db->spidev, ®configdmbulk);
+ if (IS_ERR(db->regmap_dmbulk))
+ return PTR_ERR(db->regmap_dmbulk);
+
+ return 0;
+}
+
+static int dm9051_map_chipid(struct board_info *db)
+{
+ struct device *dev = &db->spidev->dev;
+ unsigned short wid;
+ u8 buff[6];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_VIDL, buff, sizeof(buff));
+ if (ret < 0)
+ return ret;
+
+ wid = get_unaligned_le16(buff + 2);
+ if (wid != DM9051_ID) {
+ dev_err(dev, "chipid error as %04x !\n", wid);
+ return -ENODEV;
+ }
+
+ dev_info(dev, "chip %04x found\n", wid);
+ return 0;
+}
+
+/* Read DM9051_PAR registers which is the mac address loaded from EEPROM while power-on
+ */
+static int dm9051_map_etherdev_par(struct net_device *ndev, struct board_info *db)
+{
+ u8 addr[ETH_ALEN];
+ int ret;
+
+ ret = dm9051_get_regs(db, DM9051_PAR, addr, sizeof(addr));
+ if (ret < 0)
+ return ret;
+
+ if (!is_valid_ether_addr(addr)) {
+ eth_hw_addr_random(ndev);
+
+ ret = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&db->spidev->dev, "Use random MAC address\n");
+ return 0;
+ }
+
+ eth_hw_addr_set(ndev, addr);
+ return 0;
+}
+
+/* ethtool-ops
+ */
+static void dm9051_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRVNAME_9051, sizeof(info->driver));
+}
+
+static void dm9051_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->msg_enable = value;
+}
+
+static u32 dm9051_get_msglevel(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ return db->msg_enable;
+}
+
+static int dm9051_get_eeprom_len(struct net_device *dev)
+{
+ return 128;
+}
+
+static int dm9051_get_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ ee->magic = DM_EEPROM_MAGIC;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_read(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int dm9051_set_eeprom(struct net_device *ndev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int offset = ee->offset;
+ int len = ee->len;
+ int i, ret;
+
+ if ((len | offset) & 1)
+ return -EINVAL;
+
+ if (ee->magic != DM_EEPROM_MAGIC)
+ return -EINVAL;
+
+ for (i = 0; i < len; i += 2) {
+ ret = dm9051_eeprom_write(db, (offset + i) / 2, data + i);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void dm9051_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ *pause = db->pause;
+}
+
+static int dm9051_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ db->pause = *pause;
+
+ if (pause->autoneg == AUTONEG_DISABLE)
+ return dm9051_update_fcr(db);
+
+ phy_set_sym_pause(db->phydev, pause->rx_pause, pause->tx_pause,
+ pause->autoneg);
+ phy_start_aneg(db->phydev);
+ return 0;
+}
+
+static const struct ethtool_ops dm9051_ethtool_ops = {
+ .get_drvinfo = dm9051_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_msglevel = dm9051_get_msglevel,
+ .set_msglevel = dm9051_set_msglevel,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = dm9051_get_eeprom_len,
+ .get_eeprom = dm9051_get_eeprom,
+ .set_eeprom = dm9051_set_eeprom,
+ .get_pauseparam = dm9051_get_pauseparam,
+ .set_pauseparam = dm9051_set_pauseparam,
+};
+
+static int dm9051_all_start(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power on of the internal phy
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, 0);
+ if (ret)
+ return ret;
+
+ /* dm9051 chip registers could not be accessed within 1 ms
+ * after GPR power on, delay 1 ms is essential
+ */
+ msleep(1);
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ return dm9051_enable_interrupt(db);
+}
+
+static int dm9051_all_stop(struct board_info *db)
+{
+ int ret;
+
+ /* GPR power off of the internal phy,
+ * The internal phy still could be accessed after this GPR power off control
+ */
+ ret = dm9051_set_reg(db, DM9051_GPR, GPR_PHY_OFF);
+ if (ret)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_RCR, RCR_RX_DISABLE);
+}
+
+/* fifo reset while rx error found
+ */
+static int dm9051_all_restart(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ret;
+
+ ret = dm9051_core_reset(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_enable_interrupt(db);
+ if (ret)
+ return ret;
+
+ netdev_dbg(ndev, " rxstatus_Er & rxlen_Er %d, RST_c %d\n",
+ db->bc.status_err_counter + db->bc.large_err_counter,
+ db->bc.fifo_rst_counter);
+
+ ret = dm9051_set_recv(db);
+ if (ret)
+ return ret;
+
+ return dm9051_set_fcr(db);
+}
+
+/* read packets from the fifo memory
+ * return value,
+ * > 0 - read packet number, caller can repeat the rx operation
+ * 0 - no error, caller need stop further rx operation
+ * -EBUSY - read data error, caller escape from rx operation
+ */
+static int dm9051_loop_rx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ unsigned int rxbyte;
+ int ret, rxlen;
+ struct sk_buff *skb;
+ u8 *rdptr;
+ int scanrr = 0;
+
+ do {
+ ret = dm9051_read_mem(db, DM_SPI_MRCMDX, &rxbyte, 2);
+ if (ret)
+ return ret;
+
+ if ((rxbyte & GENMASK(7, 0)) != DM9051_PKT_RDY)
+ break; /* exhaust-empty */
+
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, &db->rxhdr, DM_RXHDR_SIZE);
+ if (ret)
+ return ret;
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ rxlen = le16_to_cpu(db->rxhdr.rxlen);
+ if (db->rxhdr.status & RSR_ERR_BITS || rxlen > DM9051_PKT_MAX) {
+ netdev_dbg(ndev, "rxhdr-byte (%02x)\n",
+ db->rxhdr.headbyte);
+
+ if (db->rxhdr.status & RSR_ERR_BITS) {
+ db->bc.status_err_counter++;
+ netdev_dbg(ndev, "check rxstatus-error (%02x)\n",
+ db->rxhdr.status);
+ } else {
+ db->bc.large_err_counter++;
+ netdev_dbg(ndev, "check rxlen large-error (%d > %d)\n",
+ rxlen, DM9051_PKT_MAX);
+ }
+ return dm9051_all_restart(db);
+ }
+
+ skb = dev_alloc_skb(rxlen);
+ if (!skb) {
+ ret = dm9051_dumpblk(db, DM_SPI_MRCMD, rxlen);
+ if (ret)
+ return ret;
+ return scanrr;
+ }
+
+ rdptr = skb_put(skb, rxlen - 4);
+ ret = dm9051_read_mem(db, DM_SPI_MRCMD, rdptr, rxlen);
+ if (ret) {
+ db->bc.rx_err_counter++;
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ret = dm9051_stop_mrcmd(db);
+ if (ret)
+ return ret;
+
+ skb->protocol = eth_type_trans(skb, db->ndev);
+ if (db->ndev->features & NETIF_F_RXCSUM)
+ skb_checksum_none_assert(skb);
+ netif_rx_ni(skb);
+ db->ndev->stats.rx_bytes += rxlen;
+ db->ndev->stats.rx_packets++;
+ scanrr++;
+ } while (!ret);
+
+ return scanrr;
+}
+
+/* transmit a packet,
+ * return value,
+ * 0 - succeed
+ * -ETIMEDOUT - timeout error
+ */
+static int dm9051_single_tx(struct board_info *db, u8 *buff, unsigned int len)
+{
+ int ret;
+
+ ret = dm9051_nsr_poll(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_write_mem(db, DM_SPI_MWCMD, buff, len);
+ if (ret)
+ return ret;
+
+ ret = dm9051_set_regs(db, DM9051_TXPLL, &len, 2);
+ if (ret < 0)
+ return ret;
+
+ return dm9051_set_reg(db, DM9051_TCR, TCR_TXREQ);
+}
+
+static int dm9051_loop_tx(struct board_info *db)
+{
+ struct net_device *ndev = db->ndev;
+ int ntx = 0;
+ int ret;
+
+ while (!skb_queue_empty(&db->txq)) {
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&db->txq);
+ if (skb) {
+ ntx++;
+ ret = dm9051_single_tx(db, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0) {
+ db->bc.tx_err_counter++;
+ return 0;
+ }
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+ }
+
+ if (netif_queue_stopped(ndev) &&
+ (skb_queue_len(&db->txq) < DM9051_TX_QUE_LO_WATER))
+ netif_wake_queue(ndev);
+ }
+
+ return ntx;
+}
+
+static irqreturn_t dm9051_rx_threaded_irq(int irq, void *pw)
+{
+ struct board_info *db = pw;
+ int result, result_tx;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_disable_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ result = dm9051_clear_interrupt(db);
+ if (result)
+ goto out_unlock;
+
+ do {
+ result = dm9051_loop_rx(db); /* threaded irq rx */
+ if (result < 0)
+ goto out_unlock;
+ result_tx = dm9051_loop_tx(db); /* more tx better performance */
+ if (result_tx < 0)
+ goto out_unlock;
+ } while (result > 0);
+
+ dm9051_enable_interrupt(db);
+
+ /* To exit and has mutex unlock while rx or tx error
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+
+ return IRQ_HANDLED;
+}
+
+static void dm9051_tx_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, tx_work);
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_loop_tx(db);
+ if (result < 0)
+ netdev_err(db->ndev, "transmit packet error\n");
+
+ mutex_unlock(&db->spi_lockm);
+}
+
+static void dm9051_rxctl_delay(struct work_struct *work)
+{
+ struct board_info *db = container_of(work, struct board_info, rxctrl_work);
+ struct net_device *ndev = db->ndev;
+ int result;
+
+ mutex_lock(&db->spi_lockm);
+
+ result = dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+ if (result < 0)
+ goto out_unlock;
+
+ dm9051_set_recv(db);
+
+ /* To has mutex unlock and return from this function if regmap function fail
+ */
+out_unlock:
+ mutex_unlock(&db->spi_lockm);
+}
+
+/* Open network device
+ * Called when the network device is marked active, such as a user executing
+ * 'ifconfig up' on the device
+ */
+static int dm9051_open(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->imr_all = IMR_PAR | IMR_PRM;
+ db->lcr_all = LMCR_MODE1;
+ db->rctl.rcr_all = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ memset(db->rctl.hash_table, 0, sizeof(db->rctl.hash_table));
+
+ ndev->irq = spi->irq; /* by dts */
+ ret = request_threaded_irq(spi->irq, NULL, dm9051_rx_threaded_irq,
+ dm9051_irq_flag(db) | IRQF_ONESHOT,
+ ndev->name, db);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get irq\n");
+ return ret;
+ }
+
+ phy_support_sym_pause(db->phydev);
+ phy_start(db->phydev);
+
+ /* flow control parameters init */
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ db->pause.autoneg = AUTONEG_DISABLE;
+
+ if (db->phydev->autoneg)
+ db->pause.autoneg = AUTONEG_ENABLE;
+
+ ret = dm9051_all_start(db);
+ if (ret) {
+ phy_stop(db->phydev);
+ free_irq(spi->irq, db);
+ return ret;
+ }
+
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
+/* Close network device
+ * Called to close down a network device which has been active. Cancel any
+ * work, shutdown the RX and TX process and then place the chip into a low
+ * power state while it is not being used
+ */
+static int dm9051_stop(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = dm9051_all_stop(db);
+ if (ret)
+ return ret;
+
+ flush_work(&db->tx_work);
+ flush_work(&db->rxctrl_work);
+
+ phy_stop(db->phydev);
+
+ free_irq(db->spidev->irq, db);
+
+ netif_stop_queue(ndev);
+
+ skb_queue_purge(&db->txq);
+
+ return 0;
+}
+
+/* event: play a schedule starter in condition
+ */
+static netdev_tx_t dm9051_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ skb_queue_tail(&db->txq, skb);
+ if (skb_queue_len(&db->txq) > DM9051_TX_QUE_HI_WATER)
+ netif_stop_queue(ndev); /* enforce limit queue size */
+
+ schedule_work(&db->tx_work);
+
+ return NETDEV_TX_OK;
+}
+
+/* event: play with a schedule starter
+ */
+static void dm9051_set_rx_mode(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ struct dm9051_rxctrl rxctrl;
+ struct netdev_hw_addr *ha;
+ u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
+ u32 hash_val;
+
+ memset(&rxctrl, 0, sizeof(rxctrl));
+
+ /* rx control */
+ if (ndev->flags & IFF_PROMISC) {
+ rcr |= RCR_PRMSC;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_PRMSC, rcr= %02x\n", rcr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ rcr |= RCR_ALL;
+ netdev_dbg(ndev, "set_multicast rcr |= RCR_ALLMULTI, rcr= %02x\n", rcr);
+ }
+
+ rxctrl.rcr_all = rcr;
+
+ /* broadcast address */
+ rxctrl.hash_table[0] = 0;
+ rxctrl.hash_table[1] = 0;
+ rxctrl.hash_table[2] = 0;
+ rxctrl.hash_table[3] = 0x8000;
+
+ /* the multicast address in Hash Table : 64 bits */
+ netdev_for_each_mc_addr(ha, ndev) {
+ hash_val = ether_crc_le(ETH_ALEN, ha->addr) & GENMASK(5, 0);
+ rxctrl.hash_table[hash_val / 16] |= BIT(0) << (hash_val % 16);
+ }
+
+ /* schedule work to do the actual set of the data if needed */
+
+ if (memcmp(&db->rctl, &rxctrl, sizeof(rxctrl))) {
+ memcpy(&db->rctl, &rxctrl, sizeof(rxctrl));
+ schedule_work(&db->rxctrl_work);
+ }
+}
+
+/* event: write into the mac registers and eeprom directly
+ */
+static int dm9051_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(ndev, p);
+ return dm9051_set_regs(db, DM9051_PAR, ndev->dev_addr, sizeof(ndev->dev_addr));
+}
+
+static const struct net_device_ops dm9051_netdev_ops = {
+ .ndo_open = dm9051_open,
+ .ndo_stop = dm9051_stop,
+ .ndo_start_xmit = dm9051_start_xmit,
+ .ndo_set_rx_mode = dm9051_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = dm9051_set_mac_address,
+};
+
+static void dm9051_operation_clear(struct board_info *db)
+{
+ db->bc.status_err_counter = 0;
+ db->bc.large_err_counter = 0;
+ db->bc.rx_err_counter = 0;
+ db->bc.tx_err_counter = 0;
+ db->bc.fifo_rst_counter = 0;
+}
+
+static int dm9051_mdio_register(struct board_info *db)
+{
+ struct spi_device *spi = db->spidev;
+ int ret;
+
+ db->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!db->mdiobus)
+ return -ENOMEM;
+
+ db->mdiobus->priv = db;
+ db->mdiobus->read = dm9051_mdio_read;
+ db->mdiobus->write = dm9051_mdio_write;
+ db->mdiobus->name = "dm9051-mdiobus";
+ db->mdiobus->phy_mask = (u32)~BIT(1);
+ db->mdiobus->parent = &spi->dev;
+ snprintf(db->mdiobus->id, MII_BUS_ID_SIZE,
+ "dm9051-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, db->mdiobus);
+ if (ret)
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+
+ return ret;
+}
+
+static void dm9051_handle_link_change(struct net_device *ndev)
+{
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_print_status(db->phydev);
+
+ /* only write pause settings to mac. since mac and phy are integrated
+ * together, such as link state, speed and duplex are sync already
+ */
+ if (db->phydev->link) {
+ if (db->phydev->pause) {
+ db->pause.rx_pause = true;
+ db->pause.tx_pause = true;
+ }
+ dm9051_update_fcr(db);
+ }
+}
+
+/* phy connect as poll mode
+ */
+static int dm9051_phy_connect(struct board_info *db)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+ db->mdiobus->id, DM9051_PHY_ADDR);
+
+ db->phydev = phy_connect(db->ndev, phy_id, dm9051_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(db->phydev))
+ return PTR_ERR_OR_ZERO(db->phydev);
+ return 0;
+}
+
+static int dm9051_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev;
+ struct board_info *db;
+ int ret;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(struct board_info));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
+
+ db = netdev_priv(ndev);
+
+ db->msg_enable = 0;
+ db->spidev = spi;
+ db->ndev = ndev;
+
+ ndev->netdev_ops = &dm9051_netdev_ops;
+ ndev->ethtool_ops = &dm9051_ethtool_ops;
+
+ mutex_init(&db->spi_lockm);
+ mutex_init(&db->reg_mutex);
+
+ INIT_WORK(&db->rxctrl_work, dm9051_rxctl_delay);
+ INIT_WORK(&db->tx_work, dm9051_tx_delay);
+
+ ret = dm9051_map_init(spi, db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_chipid(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_map_etherdev_par(ndev, db);
+ if (ret < 0)
+ return ret;
+
+ ret = dm9051_mdio_register(db);
+ if (ret)
+ return ret;
+
+ ret = dm9051_phy_connect(db);
+ if (ret)
+ return ret;
+
+ dm9051_operation_clear(db);
+ skb_queue_head_init(&db->txq);
+
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ phy_disconnect(db->phydev);
+ return dev_err_probe(dev, ret, "device register failed");
+ }
+
+ return 0;
+}
+
+static int dm9051_drv_remove(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct board_info *db = to_dm9051_board(ndev);
+
+ phy_disconnect(db->phydev);
+
+ return 0;
+}
+
+static const struct of_device_id dm9051_match_table[] = {
+ { .compatible = "davicom,dm9051" },
+ {}
+};
+
+static const struct spi_device_id dm9051_id_table[] = {
+ { "dm9051", 0 },
+ {}
+};
+
+static struct spi_driver dm9051_driver = {
+ .driver = {
+ .name = DRVNAME_9051,
+ .of_match_table = dm9051_match_table,
+ },
+ .probe = dm9051_probe,
+ .remove = dm9051_drv_remove,
+ .id_table = dm9051_id_table,
+};
+module_spi_driver(dm9051_driver);
+
+MODULE_AUTHOR("Joseph CHANG <joseph_chang@davicom.com.tw>");
+MODULE_DESCRIPTION("Davicom DM9051 network SPI driver");
+MODULE_LICENSE("GPL");
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Davicom Semiconductor,Inc.
+ * Davicom DM9051 SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _DM9051_H_
+#define _DM9051_H_
+
+#include <linux/bits.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#define DM9051_ID 0x9051
+
+#define DM9051_NCR 0x00
+#define DM9051_NSR 0x01
+#define DM9051_TCR 0x02
+#define DM9051_RCR 0x05
+#define DM9051_BPTR 0x08
+#define DM9051_FCR 0x0A
+#define DM9051_EPCR 0x0B
+#define DM9051_EPAR 0x0C
+#define DM9051_EPDRL 0x0D
+#define DM9051_EPDRH 0x0E
+#define DM9051_PAR 0x10
+#define DM9051_MAR 0x16
+#define DM9051_GPCR 0x1E
+#define DM9051_GPR 0x1F
+
+#define DM9051_VIDL 0x28
+#define DM9051_VIDH 0x29
+#define DM9051_PIDL 0x2A
+#define DM9051_PIDH 0x2B
+#define DM9051_SMCR 0x2F
+#define DM9051_ATCR 0x30
+#define DM9051_SPIBCR 0x38
+#define DM9051_INTCR 0x39
+#define DM9051_PPCR 0x3D
+
+#define DM9051_MPCR 0x55
+#define DM9051_LMCR 0x57
+#define DM9051_MBNDRY 0x5E
+
+#define DM9051_MRRL 0x74
+#define DM9051_MRRH 0x75
+#define DM9051_MWRL 0x7A
+#define DM9051_MWRH 0x7B
+#define DM9051_TXPLL 0x7C
+#define DM9051_TXPLH 0x7D
+#define DM9051_ISR 0x7E
+#define DM9051_IMR 0x7F
+
+#define DM_SPI_MRCMDX 0x70
+#define DM_SPI_MRCMD 0x72
+#define DM_SPI_MWCMD 0x78
+
+#define DM_SPI_WR 0x80
+
+/* dm9051 Ethernet controller registers bits
+ */
+/* 0x00 */
+#define NCR_WAKEEN BIT(6)
+#define NCR_FDX BIT(3)
+#define NCR_RST BIT(0)
+/* 0x01 */
+#define NSR_SPEED BIT(7)
+#define NSR_LINKST BIT(6)
+#define NSR_WAKEST BIT(5)
+#define NSR_TX2END BIT(3)
+#define NSR_TX1END BIT(2)
+/* 0x02 */
+#define TCR_DIS_JABBER_TIMER BIT(6) /* for Jabber Packet support */
+#define TCR_TXREQ BIT(0)
+/* 0x05 */
+#define RCR_DIS_WATCHDOG_TIMER BIT(6) /* for Jabber Packet support */
+#define RCR_DIS_LONG BIT(5)
+#define RCR_DIS_CRC BIT(4)
+#define RCR_ALL BIT(3)
+#define RCR_PRMSC BIT(1)
+#define RCR_RXEN BIT(0)
+#define RCR_RX_DISABLE (RCR_DIS_LONG | RCR_DIS_CRC)
+/* 0x06 */
+#define RSR_RF BIT(7)
+#define RSR_MF BIT(6)
+#define RSR_LCS BIT(5)
+#define RSR_RWTO BIT(4)
+#define RSR_PLE BIT(3)
+#define RSR_AE BIT(2)
+#define RSR_CE BIT(1)
+#define RSR_FOE BIT(0)
+#define RSR_ERR_BITS (RSR_RF | RSR_LCS | RSR_RWTO | RSR_PLE | \
+ RSR_AE | RSR_CE | RSR_FOE)
+/* 0x0A */
+#define FCR_TXPEN BIT(5)
+#define FCR_BKPM BIT(3)
+#define FCR_FLCE BIT(0)
+#define FCR_RXTX_BITS (FCR_TXPEN | FCR_BKPM | FCR_FLCE)
+/* 0x0B */
+#define EPCR_WEP BIT(4)
+#define EPCR_EPOS BIT(3)
+#define EPCR_ERPRR BIT(2)
+#define EPCR_ERPRW BIT(1)
+#define EPCR_ERRE BIT(0)
+/* 0x1E */
+#define GPCR_GEP_CNTL BIT(0)
+/* 0x1F */
+#define GPR_PHY_OFF BIT(0)
+/* 0x30 */
+#define ATCR_AUTO_TX BIT(7)
+/* 0x39 */
+#define INTCR_POL_LOW (1 << 0)
+#define INTCR_POL_HIGH (0 << 0)
+/* 0x3D */
+/* Pause Packet Control Register - default = 1 */
+#define PPCR_PAUSE_COUNT 0x08
+/* 0x55 */
+#define MPCR_RSTTX BIT(1)
+#define MPCR_RSTRX BIT(0)
+/* 0x57 */
+/* LEDMode Control Register - LEDMode1 */
+/* Value 0x81 : bit[7] = 1, bit[2] = 0, bit[1:0] = 01b */
+#define LMCR_NEWMOD BIT(7)
+#define LMCR_TYPED1 BIT(1)
+#define LMCR_TYPED0 BIT(0)
+#define LMCR_MODE1 (LMCR_NEWMOD | LMCR_TYPED0)
+/* 0x5E */
+#define MBNDRY_BYTE BIT(7)
+/* 0xFE */
+#define ISR_MBS BIT(7)
+#define ISR_LNKCHG BIT(5)
+#define ISR_ROOS BIT(3)
+#define ISR_ROS BIT(2)
+#define ISR_PTS BIT(1)
+#define ISR_PRS BIT(0)
+#define ISR_CLR_INT (ISR_LNKCHG | ISR_ROOS | ISR_ROS | \
+ ISR_PTS | ISR_PRS)
+#define ISR_STOP_MRCMD (ISR_MBS)
+/* 0xFF */
+#define IMR_PAR BIT(7)
+#define IMR_LNKCHGI BIT(5)
+#define IMR_PTM BIT(1)
+#define IMR_PRM BIT(0)
+
+/* Const
+ */
+#define DM9051_PHY_ADDR 1 /* PHY id */
+#define DM9051_PHY 0x40 /* PHY address 0x01 */
+#define DM9051_PKT_RDY 0x01 /* Packet ready to receive */
+#define DM9051_PKT_MAX 1536 /* Received packet max size */
+#define DM9051_TX_QUE_HI_WATER 50
+#define DM9051_TX_QUE_LO_WATER 25
+#define DM_EEPROM_MAGIC 0x9051
+
+#define DM_RXHDR_SIZE sizeof(struct dm9051_rxhdr)
+
+static inline struct board_info *to_dm9051_board(struct net_device *ndev)
+{
+ return netdev_priv(ndev);
+}
+
+#endif /* _DM9051_H_ */
/* Setup the SG entry for the header */
dpaa2_sg_set_addr(sgt, tso_hdr_dma);
dpaa2_sg_set_len(sgt, hdr_len);
- dpaa2_sg_set_final(sgt, data_left > 0 ? false : true);
+ dpaa2_sg_set_final(sgt, data_left <= 0);
/* Compose the SG entries for each fragment of data */
num_sge = 1;
}
dpaa2_sg_set_addr(sgt, addr);
dpaa2_sg_set_len(sgt, size);
- dpaa2_sg_set_final(sgt, size == data_left ? true : false);
+ dpaa2_sg_set_final(sgt, size == data_left);
num_sge++;
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+
+ unregister_netdev(net_dev);
rtnl_lock();
dpaa2_eth_disconnect_mac(priv);
rtnl_unlock();
- unregister_netdev(net_dev);
-
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
dpaa2_eth_dl_free(priv);
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
int i, j;
int err;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return 0;
priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int i;
- /* Raw addressing means no QPLs */
- if (priv->queue_format == GVE_GQI_RDA_FORMAT)
+ if (num_qpls == 0)
return;
kvfree(priv->qpl_cfg.qpl_id_map);
*packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
*work_done = work_cnt;
+ skb_record_rx_queue(skb, rx->q_num);
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
return tqp->index;
}
-static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
-{
- return !!hdev->reset_pending;
-}
-
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
- release_resources(adapter);
+ ibmvnic_disable_irqs(adapter);
return rc;
}
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
- release_resources(adapter);
- release_rx_pools(adapter);
- release_tx_pools(adapter);
goto out;
}
}
adapter->state = VNIC_OPEN;
rc = 0;
}
+
+ if (rc) {
+ release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ }
+
return rc;
}
ICE_FLAG_FD_ENA,
ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
ICE_FLAG_PTP, /* PTP is enabled by software */
- ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
ICE_FLAG_CLS_FLOWER,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ ICE_FLAG_PLUG_AUX_DEV,
ICE_PF_FLAGS_NBITS /* must be last */
};
{
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- ice_plug_aux_dev(pf);
+ set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
{
ice_unplug_aux_dev(pf);
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
}
#endif /* _ICE_H_ */
!ice_fw_supports_report_dflt_cfg(hw)) {
struct ice_link_default_override_tlv tlv;
- if (ice_get_link_default_override(&tlv, pi))
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (!ice_is_rdma_ena(pf))
return -EINVAL;
vsi = ice_get_main_vsi(pf);
*/
static int ice_reserve_rdma_qvector(struct ice_pf *pf)
{
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
int index;
index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
/* if this PF doesn't support a technology that requires auxiliary
* devices, then gracefully exit
*/
- if (!ice_is_aux_ena(pf))
+ if (!ice_is_rdma_ena(pf))
return 0;
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
lag->upper_netdev = NULL;
}
- if (lag->peer_netdev) {
- dev_put(lag->peer_netdev);
- lag->peer_netdev = NULL;
- }
-
+ lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}
+/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+ struct ice_pf *pf = lag->pf;
+
+ /* check to see if this event is for this netdev
+ * check that we are in an aggregate
+ */
+ if (netdev != lag->netdev || !lag->bonded)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
+ }
+ /* perform some cleanup in case we come back */
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
- ice_lag_unlink(lag, ptr);
+ ice_lag_unregister(lag, netdev);
break;
default:
break;
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
}
/**
- * ice_is_aux_ena
+ * ice_is_rdma_ena
* @pf: pointer to the PF struct
*
- * returns true if AUX devices/drivers are supported, false otherwise
+ * returns true if RDMA is currently supported, false otherwise
*/
-bool ice_is_aux_ena(struct ice_pf *pf)
+bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
/**
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-bool ice_is_aux_ena(struct ice_pf *pf);
+bool ice_is_rdma_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
return;
}
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
- if (func_caps->common_cap.rdma) {
+ if (func_caps->common_cap.rdma)
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
- set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
v_left -= needed;
/* reserve vectors for RDMA auxiliary driver */
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
int v_remain = v_actual - v_other;
int v_rdma = 0, v_min_rdma = 0;
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ if (ice_is_rdma_ena(pf)) {
/* Need at least 1 interrupt in addition to
* AEQ MSIX
*/
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
- if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ if (ice_is_rdma_ena(pf))
dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
pf->num_rdma_msix);
}
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_aux_ena(pf)) {
+ if (ice_is_rdma_ena(pf)) {
pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
if (pf->aux_idx < 0) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
return 0;
}
-static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id, u16 pfc_en,
+static void rpm_cfg_pfc_quanta_thresh(rpm_t *rpm, int lmac_id,
+ unsigned long pfc_en,
bool enable)
{
u64 quanta_offset = 0, quanta_thresh = 0, cfg;
int i, shift;
/* Set pause time and interval */
- for_each_set_bit(i, (unsigned long *)&pfc_en, 16) {
+ for_each_set_bit(i, &pfc_en, 16) {
switch (i) {
case 0:
case 1:
struct prestera_switch *sw;
struct list_head vr_list;
struct list_head rif_entry_list;
+ struct rhashtable fib_ht;
+ struct rhashtable kern_fib_cache_ht;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
+ struct notifier_block fib_nb;
};
struct prestera_rxtx_params {
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+void prestera_queue_work(struct work_struct *work);
+
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
struct prestera_acl_ruleset_ht_key {
struct prestera_flow_block *block;
+ u32 chain_index;
};
struct prestera_acl_rule_entry {
struct {
u8 valid:1;
} accept, drop, trap;
+ struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
struct {
u32 id;
struct prestera_counter_block *block;
refcount_t refcount;
void *keymask;
u32 vtcam_id;
+ u32 index;
u16 pcl_id;
bool offload;
};
.automatic_shrinking = true,
};
+int prestera_acl_chain_to_client(u32 chain_index, u32 *client)
+{
+ u32 client_map[] = {
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_1,
+ PRESTERA_HW_COUNTER_CLIENT_LOOKUP_2
+ };
+
+ if (chain_index > ARRAY_SIZE(client_map))
+ return -EINVAL;
+
+ *client = client_map[chain_index];
+ return 0;
+}
+
+static bool prestera_acl_chain_is_supported(u32 chain_index)
+{
+ return (chain_index & ~PRESTERA_ACL_CHAIN_MASK) == 0;
+}
+
static struct prestera_acl_ruleset *
prestera_acl_ruleset_create(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
u32 uid = 0;
int err;
+ if (!prestera_acl_chain_is_supported(chain_index))
+ return ERR_PTR(-EINVAL);
+
ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
if (!ruleset)
return ERR_PTR(-ENOMEM);
ruleset->acl = acl;
ruleset->ht_key.block = block;
+ ruleset->ht_key.chain_index = chain_index;
refcount_set(&ruleset->refcount, 1);
err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
goto err_ruleset_create;
/* make pcl-id based on uid */
- ruleset->pcl_id = (u8)uid;
+ ruleset->pcl_id = PRESTERA_ACL_PCL_ID_MAKE((u8)uid, chain_index);
+ ruleset->index = uid;
+
err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
if (err)
int prestera_acl_ruleset_offload(struct prestera_acl_ruleset *ruleset)
{
+ struct prestera_acl_iface iface;
u32 vtcam_id;
int err;
if (ruleset->offload)
return -EEXIST;
- err = prestera_acl_vtcam_id_get(ruleset->acl, 0,
+ err = prestera_acl_vtcam_id_get(ruleset->acl,
+ ruleset->ht_key.chain_index,
ruleset->keymask, &vtcam_id);
if (err)
- return err;
+ goto err_vtcam_create;
+
+ if (ruleset->ht_key.chain_index) {
+ /* for chain > 0, bind iface index to pcl-id to be able
+ * to jump from any other ruleset to this one using the index.
+ */
+ iface.index = ruleset->index;
+ iface.type = PRESTERA_ACL_IFACE_TYPE_INDEX;
+ err = prestera_hw_vtcam_iface_bind(ruleset->acl->sw, &iface,
+ vtcam_id, ruleset->pcl_id);
+ if (err)
+ goto err_ruleset_bind;
+ }
ruleset->vtcam_id = vtcam_id;
ruleset->offload = true;
return 0;
+
+err_ruleset_bind:
+ prestera_acl_vtcam_id_put(ruleset->acl, ruleset->vtcam_id);
+err_vtcam_create:
+ return err;
}
static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
{
struct prestera_acl *acl = ruleset->acl;
u8 uid = ruleset->pcl_id & PRESTERA_ACL_KEYMASK_PCL_ID_USER;
+ int err;
rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
prestera_acl_ruleset_ht_params);
- if (ruleset->offload)
+ if (ruleset->offload) {
+ if (ruleset->ht_key.chain_index) {
+ struct prestera_acl_iface iface = {
+ .type = PRESTERA_ACL_IFACE_TYPE_INDEX,
+ .index = ruleset->index
+ };
+ err = prestera_hw_vtcam_iface_unbind(acl->sw, &iface,
+ ruleset->vtcam_id);
+ WARN_ON(err);
+ }
WARN_ON(prestera_acl_vtcam_id_put(acl, ruleset->vtcam_id));
+ }
idr_remove(&acl->uid, uid);
-
rhashtable_destroy(&ruleset->rule_ht);
kfree(ruleset->keymask);
kfree(ruleset);
static struct prestera_acl_ruleset *
__prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset_ht_key ht_key;
memset(&ht_key, 0, sizeof(ht_key));
ht_key.block = block;
+ ht_key.chain_index = chain_index;
return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
prestera_acl_ruleset_ht_params);
}
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (!ruleset)
return ERR_PTR(-ENOENT);
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block)
+ struct prestera_flow_block *block,
+ u32 chain_index)
{
struct prestera_acl_ruleset *ruleset;
- ruleset = __prestera_acl_ruleset_lookup(acl, block);
+ ruleset = __prestera_acl_ruleset_lookup(acl, block, chain_index);
if (ruleset) {
refcount_inc(&ruleset->refcount);
return ruleset;
}
- return prestera_acl_ruleset_create(acl, block);
+ return prestera_acl_ruleset_create(acl, block, chain_index);
}
void prestera_acl_ruleset_put(struct prestera_acl_ruleset *ruleset)
prestera_acl_rule_ht_params);
}
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset)
+{
+ return ruleset->index;
+}
+
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset)
{
return ruleset->offload;
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie)
+ unsigned long cookie, u32 chain_index)
{
struct prestera_acl_rule *rule;
rule->ruleset = ruleset;
rule->cookie = cookie;
+ rule->chain_index = chain_index;
refcount_inc(&ruleset->refcount);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
{
+ if (rule->jump_ruleset)
+ /* release ruleset kept by jump action */
+ prestera_acl_ruleset_put(rule->jump_ruleset);
+
prestera_acl_ruleset_put(rule->ruleset);
kfree(rule);
}
/* setup counter */
rule->re_arg.count.valid = true;
- rule->re_arg.count.client = PRESTERA_HW_COUNTER_CLIENT_LOOKUP_0;
+ err = prestera_acl_chain_to_client(ruleset->ht_key.chain_index,
+ &rule->re_arg.count.client);
+ if (err)
+ goto err_rule_add;
rule->re = prestera_acl_rule_entry_find(sw->acl, &rule->re_key);
err = WARN_ON(rule->re) ? -EEXIST : 0;
if (err)
goto err_rule_add;
- /* bind the block (all ports) to chain index 0 */
- if (!ruleset->rule_count) {
+ /* bind the block (all ports) to chain index 0, rest of
+ * the chains are bound to goto action
+ */
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count) {
err = prestera_acl_ruleset_block_bind(ruleset, block);
if (err)
goto err_acl_block_bind;
prestera_acl_rule_entry_destroy(sw->acl, rule->re);
/* unbind block (all ports) */
- if (!ruleset->rule_count)
+ if (!ruleset->ht_key.chain_index && !ruleset->rule_count)
prestera_acl_ruleset_block_unbind(ruleset, block);
}
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_TRAP;
act_num++;
}
+ /* jump */
+ if (e->jump.valid) {
+ act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_JUMP;
+ act_hw[act_num].jump = e->jump.i;
+ act_num++;
+ }
/* counter */
if (e->counter.block) {
act_hw[act_num].id = PRESTERA_ACL_RULE_ACTION_COUNT;
e->drop.valid = arg->drop.valid;
/* trap */
e->trap.valid = arg->trap.valid;
+ /* jump */
+ e->jump.valid = arg->jump.valid;
+ e->jump.i = arg->jump.i;
/* counter */
if (arg->count.valid) {
int err;
#define PRESTERA_ACL_KEYMASK_PCL_ID 0x3FF
#define PRESTERA_ACL_KEYMASK_PCL_ID_USER \
(PRESTERA_ACL_KEYMASK_PCL_ID & 0x00FF)
+#define PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN \
+ (PRESTERA_ACL_KEYMASK_PCL_ID & 0xFF00)
+#define PRESTERA_ACL_CHAIN_MASK \
+ (PRESTERA_ACL_KEYMASK_PCL_ID >> 8)
+
+#define PRESTERA_ACL_PCL_ID_MAKE(uid, chain_id) \
+ (((uid) & PRESTERA_ACL_KEYMASK_PCL_ID_USER) | \
+ (((chain_id) << 8) & PRESTERA_ACL_KEYMASK_PCL_ID_CHAIN))
#define rule_match_set_n(match_p, type, val_p, size) \
memcpy(&(match_p)[PRESTERA_ACL_RULE_MATCH_TYPE_##type], \
PRESTERA_ACL_RULE_ACTION_ACCEPT = 0,
PRESTERA_ACL_RULE_ACTION_DROP = 1,
PRESTERA_ACL_RULE_ACTION_TRAP = 2,
+ PRESTERA_ACL_RULE_ACTION_JUMP = 5,
PRESTERA_ACL_RULE_ACTION_COUNT = 7,
PRESTERA_ACL_RULE_ACTION_MAX
__be32 mask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
};
+struct prestera_acl_action_jump {
+ u32 index;
+};
+
struct prestera_acl_action_count {
u32 id;
};
enum prestera_acl_rule_action id;
union {
struct prestera_acl_action_count count;
+ struct prestera_acl_action_jump jump;
};
};
struct {
u8 valid:1;
} accept, drop, trap;
+ struct {
+ struct prestera_acl_action_jump i;
+ u8 valid:1;
+ } jump;
struct {
u8 valid:1;
u32 client;
struct rhash_head ht_node; /* Member of acl HT */
struct list_head list;
struct prestera_acl_ruleset *ruleset;
+ struct prestera_acl_ruleset *jump_ruleset;
unsigned long cookie;
+ u32 chain_index;
u32 priority;
struct prestera_acl_rule_entry_key re_key;
struct prestera_acl_rule_entry_arg re_arg;
struct prestera_acl_rule *
prestera_acl_rule_create(struct prestera_acl_ruleset *ruleset,
- unsigned long cookie);
+ unsigned long cookie, u32 chain_index);
void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
u32 priority);
void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
struct prestera_acl_rule_entry_arg *arg);
struct prestera_acl_ruleset *
prestera_acl_ruleset_get(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
struct prestera_acl_ruleset *
prestera_acl_ruleset_lookup(struct prestera_acl *acl,
- struct prestera_flow_block *block);
+ struct prestera_flow_block *block,
+ u32 chain_index);
void prestera_acl_ruleset_keymask_set(struct prestera_acl_ruleset *ruleset,
void *keymask);
bool prestera_acl_ruleset_is_offload(struct prestera_acl_ruleset *ruleset);
struct prestera_port *port);
int prestera_acl_ruleset_unbind(struct prestera_acl_ruleset *ruleset,
struct prestera_port *port);
+u32 prestera_acl_ruleset_index_get(const struct prestera_acl_ruleset *ruleset);
void
prestera_acl_rule_keymask_pcl_id_set(struct prestera_acl_rule *rule,
u16 pcl_id);
int prestera_acl_vtcam_id_get(struct prestera_acl *acl, u8 lookup,
void *keymask, u32 *vtcam_id);
int prestera_acl_vtcam_id_put(struct prestera_acl *acl, u32 vtcam_id);
+int prestera_acl_chain_to_client(u32 chain_index, u32 *client);
#endif /* _PRESTERA_ACL_H_ */
static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
struct flow_cls_offload *f)
{
- if (f->common.chain_index != 0)
- return -EOPNOTSUPP;
-
switch (f->command) {
case FLOW_CLS_REPLACE:
return prestera_flower_replace(block, f);
prestera_flower_template_cleanup(block);
+ WARN_ON(!list_empty(&block->template_list));
WARN_ON(!list_empty(&block->binding_list));
kfree(block);
return NULL;
INIT_LIST_HEAD(&block->binding_list);
+ INIT_LIST_HEAD(&block->template_list);
block->net = net;
block->sw = sw;
struct prestera_port;
struct prestera_switch;
-struct prestera_flower_template;
struct prestera_flow_block_binding {
struct list_head list;
struct net *net;
struct prestera_acl_ruleset *ruleset_zero;
struct flow_block_cb *block_cb;
- struct prestera_flower_template *tmplt;
+ struct list_head template_list;
unsigned int rule_count;
};
struct prestera_flower_template {
struct prestera_acl_ruleset *ruleset;
+ struct list_head list;
+ u32 chain_index;
};
void prestera_flower_template_cleanup(struct prestera_flow_block *block)
{
- if (block->tmplt) {
- /* put the reference to the ruleset kept in create */
- prestera_acl_ruleset_put(block->tmplt->ruleset);
- kfree(block->tmplt);
- block->tmplt = NULL;
- return;
+ struct prestera_flower_template *template;
+ struct list_head *pos, *n;
+
+ /* put the reference to all rulesets kept in tmpl create */
+ list_for_each_safe(pos, n, &block->template_list) {
+ template = list_entry(pos, typeof(*template), list);
+ prestera_acl_ruleset_put(template->ruleset);
+ list_del(&template->list);
+ kfree(template);
}
}
+static int
+prestera_flower_parse_goto_action(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ u32 chain_index,
+ const struct flow_action_entry *act)
+{
+ struct prestera_acl_ruleset *ruleset;
+
+ if (act->chain_index <= chain_index)
+ /* we can jump only forward */
+ return -EINVAL;
+
+ if (rule->re_arg.jump.valid)
+ return -EEXIST;
+
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ act->chain_index);
+ if (IS_ERR(ruleset))
+ return PTR_ERR(ruleset);
+
+ rule->re_arg.jump.valid = 1;
+ rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
+
+ rule->jump_ruleset = ruleset;
+
+ return 0;
+}
+
static int prestera_flower_parse_actions(struct prestera_flow_block *block,
struct prestera_acl_rule *rule,
struct flow_action *flow_action,
+ u32 chain_index,
struct netlink_ext_ack *extack)
{
const struct flow_action_entry *act;
- int i;
+ int err, i;
/* whole struct (rule->re_arg) must be initialized with 0 */
if (!flow_action_has_entries(flow_action))
rule->re_arg.trap.valid = 1;
break;
+ case FLOW_ACTION_GOTO:
+ err = prestera_flower_parse_goto_action(block, rule,
+ chain_index,
+ act);
+ if (err)
+ return err;
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
pr_err("Unsupported action\n");
}
return prestera_flower_parse_actions(block, rule, &f->rule->action,
+ f->common.chain_index,
f->common.extack);
}
struct prestera_acl_rule *rule;
int err;
- ruleset = prestera_acl_ruleset_get(acl, block);
+ ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
/* increments the ruleset reference */
- rule = prestera_acl_rule_create(ruleset, f->cookie);
+ rule = prestera_acl_rule_create(ruleset, f->cookie,
+ f->common.chain_index);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
goto err_rule_create;
struct prestera_acl_ruleset *ruleset;
struct prestera_acl_rule *rule;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return;
}
prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
- ruleset = prestera_acl_ruleset_get(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR_OR_NULL(ruleset)) {
err = -EINVAL;
goto err_ruleset_get;
/* keep the reference to the ruleset */
template->ruleset = ruleset;
- block->tmplt = template;
+ template->chain_index = f->common.chain_index;
+ list_add_rcu(&template->list, &block->template_list);
return 0;
err_ruleset_get:
u64 bytes;
int err;
- ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block);
+ ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
+ f->common.chain_index);
if (IS_ERR(ruleset))
return PTR_ERR(ruleset);
#include <net/pkt_cls.h>
-struct prestera_switch;
struct prestera_flow_block;
int prestera_flower_replace(struct prestera_flow_block *block,
PRESTERA_CMD_TYPE_ROUTER_RIF_CREATE = 0x600,
PRESTERA_CMD_TYPE_ROUTER_RIF_DELETE = 0x601,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_ADD = 0x610,
+ PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE = 0x611,
PRESTERA_CMD_TYPE_ROUTER_VR_CREATE = 0x630,
PRESTERA_CMD_TYPE_ROUTER_VR_DELETE = 0x631,
__le32 id;
__le32 __reserved;
union {
+ struct {
+ __le32 index;
+ } jump;
struct {
__le32 id;
} count;
u8 __pad[3];
};
+struct prestera_msg_ip_addr {
+ union {
+ __be32 ipv4;
+ __be32 ipv6[4];
+ } u;
+ u8 v; /* e.g. PRESTERA_IPV4 */
+ u8 __pad[3];
+};
+
struct prestera_msg_rif_req {
struct prestera_msg_cmd cmd;
struct prestera_msg_iface iif;
u8 __pad[2];
};
+struct prestera_msg_lpm_req {
+ struct prestera_msg_cmd cmd;
+ struct prestera_msg_ip_addr dst;
+ __le32 grp_id;
+ __le32 dst_len;
+ __le16 vr_id;
+ u8 __pad[2];
+};
+
struct prestera_msg_vr_req {
struct prestera_msg_cmd cmd;
__le16 vr_id;
BUILD_BUG_ON(sizeof(struct prestera_msg_counter_stats) != 16);
BUILD_BUG_ON(sizeof(struct prestera_msg_rif_req) != 36);
BUILD_BUG_ON(sizeof(struct prestera_msg_vr_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lpm_req) != 36);
/* structure that are part of req/resp fw messages */
BUILD_BUG_ON(sizeof(struct prestera_msg_iface) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_ip_addr) != 20);
/* check responses */
BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
case PRESTERA_ACL_RULE_ACTION_TRAP:
/* just rule action id, no specific data */
break;
+ case PRESTERA_ACL_RULE_ACTION_JUMP:
+ action->jump.index = __cpu_to_le32(info->jump.index);
+ break;
case PRESTERA_ACL_RULE_ACTION_COUNT:
action->count.id = __cpu_to_le32(info->count.id);
break;
sizeof(req));
}
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .grp_id = __cpu_to_le32(grp_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_ADD, &req.cmd,
+ sizeof(req));
+}
+
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len)
+{
+ struct prestera_msg_lpm_req req = {
+ .dst_len = __cpu_to_le32(dst_len),
+ .vr_id = __cpu_to_le16(vr_id),
+ .dst.u.ipv4 = dst
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ROUTER_LPM_DELETE, &req.cmd,
+ sizeof(req));
+}
+
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params)
{
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id);
int prestera_hw_vr_delete(struct prestera_switch *sw, u16 vr_id);
+/* LPM PI */
+int prestera_hw_lpm_add(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len, u32 grp_id);
+int prestera_hw_lpm_del(struct prestera_switch *sw, u16 vr_id,
+ __be32 dst, u32 dst_len);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
#define PRESTERA_MAC_ADDR_NUM_MAX 255
static struct workqueue_struct *prestera_wq;
+static struct workqueue_struct *prestera_owq;
+
+void prestera_queue_work(struct work_struct *work)
+{
+ queue_work(prestera_owq, work);
+}
int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
{
if (!prestera_wq)
return -ENOMEM;
+ prestera_owq = alloc_ordered_workqueue("prestera_ordered", 0);
+ if (!prestera_owq)
+ return -ENOMEM;
+
return 0;
}
static void __exit prestera_module_exit(void)
{
destroy_workqueue(prestera_wq);
+ destroy_workqueue(prestera_owq);
}
module_init(prestera_module_init);
#include <linux/types.h>
#include <linux/inetdevice.h>
#include <net/switchdev.h>
+#include <linux/rhashtable.h>
#include "prestera.h"
#include "prestera_router_hw.h"
+struct prestera_kern_fib_cache_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 kern_tb_id; /* tb_id from kernel (not fixed) */
+};
+
+/* Subscribing on neighbours in kernel */
+struct prestera_kern_fib_cache {
+ struct prestera_kern_fib_cache_key key;
+ struct {
+ struct prestera_fib_key fib_key;
+ enum prestera_fib_type fib_type;
+ } lpm_info; /* hold prepared lpm info */
+ /* Indicate if route is not overlapped by another table */
+ struct rhash_head ht_node; /* node of prestera_router */
+ struct fib_info *fi;
+ u8 kern_tos;
+ u8 kern_type;
+ bool reachable;
+};
+
+static const struct rhashtable_params __prestera_kern_fib_cache_ht_params = {
+ .key_offset = offsetof(struct prestera_kern_fib_cache, key),
+ .head_offset = offsetof(struct prestera_kern_fib_cache, ht_node),
+ .key_len = sizeof(struct prestera_kern_fib_cache_key),
+ .automatic_shrinking = true,
+};
+
/* This util to be used, to convert kernel rules for default vr in hw_vr */
static u32 prestera_fix_tb_id(u32 tb_id)
{
return tb_id;
}
+static void
+prestera_util_fen_info2fib_cache_key(struct fib_entry_notifier_info *fen_info,
+ struct prestera_kern_fib_cache_key *key)
+{
+ memset(key, 0, sizeof(*key));
+ key->addr.u.ipv4 = cpu_to_be32(fen_info->dst);
+ key->prefix_len = fen_info->dst_len;
+ key->kern_tb_id = fen_info->tb_id;
+}
+
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_find(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+
+ fib_cache =
+ rhashtable_lookup_fast(&sw->router->kern_fib_cache_ht, key,
+ __prestera_kern_fib_cache_ht_params);
+ return IS_ERR(fib_cache) ? NULL : fib_cache;
+}
+
+static void
+prestera_kern_fib_cache_destroy(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fib_cache)
+{
+ fib_info_put(fib_cache->fi);
+ rhashtable_remove_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ kfree(fib_cache);
+}
+
+/* Operations on fi (offload, etc) must be wrapped in utils.
+ * This function just create storage.
+ */
+static struct prestera_kern_fib_cache *
+prestera_kern_fib_cache_create(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache_key *key,
+ struct fib_info *fi, u8 tos, u8 type)
+{
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ fib_cache = kzalloc(sizeof(*fib_cache), GFP_KERNEL);
+ if (!fib_cache)
+ goto err_kzalloc;
+
+ memcpy(&fib_cache->key, key, sizeof(*key));
+ fib_info_hold(fi);
+ fib_cache->fi = fi;
+ fib_cache->kern_tos = tos;
+ fib_cache->kern_type = type;
+
+ err = rhashtable_insert_fast(&sw->router->kern_fib_cache_ht,
+ &fib_cache->ht_node,
+ __prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_cache;
+
+err_ht_insert:
+ fib_info_put(fi);
+ kfree(fib_cache);
+err_kzalloc:
+ return NULL;
+}
+
+static void
+__prestera_k_arb_fib_lpm_offload_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool fail, bool offload, bool trap)
+{
+ struct fib_rt_info fri;
+
+ if (fc->key.addr.v != PRESTERA_IPV4)
+ return;
+
+ fri.fi = fc->fi;
+ fri.tb_id = fc->key.kern_tb_id;
+ fri.dst = fc->key.addr.u.ipv4;
+ fri.dst_len = fc->key.prefix_len;
+ fri.tos = fc->kern_tos;
+ fri.type = fc->kern_type;
+ /* flags begin */
+ fri.offload = offload;
+ fri.trap = trap;
+ fri.offload_failed = fail;
+ /* flags end */
+ fib_alias_hw_flags_set(&init_net, &fri);
+}
+
+static int
+__prestera_pr_k_arb_fc_lpm_info_calc(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ memset(&fc->lpm_info, 0, sizeof(fc->lpm_info));
+
+ switch (fc->fi->fib_type) {
+ case RTN_UNICAST:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ /* Unsupported. Leave it for kernel: */
+ case RTN_BROADCAST:
+ case RTN_MULTICAST:
+ /* Routes we must trap by design: */
+ case RTN_LOCAL:
+ case RTN_UNREACHABLE:
+ case RTN_PROHIBIT:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_TRAP;
+ break;
+ case RTN_BLACKHOLE:
+ fc->lpm_info.fib_type = PRESTERA_FIB_TYPE_DROP;
+ break;
+ default:
+ dev_err(sw->dev->dev, "Unsupported fib_type");
+ return -EOPNOTSUPP;
+ }
+
+ fc->lpm_info.fib_key.addr = fc->key.addr;
+ fc->lpm_info.fib_key.prefix_len = fc->key.prefix_len;
+ fc->lpm_info.fib_key.tb_id = prestera_fix_tb_id(fc->key.kern_tb_id);
+
+ return 0;
+}
+
+static int __prestera_k_arb_f_lpm_set(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc,
+ bool enabled)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = prestera_fib_node_find(sw, &fc->lpm_info.fib_key);
+ if (fib_node)
+ prestera_fib_node_destroy(sw, fib_node);
+
+ if (!enabled)
+ return 0;
+
+ fib_node = prestera_fib_node_create(sw, &fc->lpm_info.fib_key,
+ fc->lpm_info.fib_type);
+
+ if (!fib_node) {
+ dev_err(sw->dev->dev, "fib_node=NULL %pI4n/%d kern_tb_id = %d",
+ &fc->key.addr.u.ipv4, fc->key.prefix_len,
+ fc->key.kern_tb_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int __prestera_k_arb_fc_apply(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ int err;
+
+ err = __prestera_pr_k_arb_fc_lpm_info_calc(sw, fc);
+ if (err)
+ return err;
+
+ err = __prestera_k_arb_f_lpm_set(sw, fc, fc->reachable);
+ if (err) {
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc,
+ true, false, false);
+ return err;
+ }
+
+ switch (fc->lpm_info.fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false,
+ false, fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ __prestera_k_arb_fib_lpm_offload_set(sw, fc, false, true,
+ fc->reachable);
+ break;
+ case PRESTERA_FIB_TYPE_INVALID:
+ break;
+ }
+
+ return 0;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlaps(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_LOCAL) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_MAIN;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static struct prestera_kern_fib_cache *
+__prestera_k_arb_util_fib_overlapped(struct prestera_switch *sw,
+ struct prestera_kern_fib_cache *fc)
+{
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *rfc;
+
+ /* TODO: parse kernel rules */
+ rfc = NULL;
+ if (fc->key.kern_tb_id == RT_TABLE_MAIN) {
+ memcpy(&fc_key, &fc->key, sizeof(fc_key));
+ fc_key.kern_tb_id = RT_TABLE_LOCAL;
+ rfc = prestera_kern_fib_cache_find(sw, &fc_key);
+ }
+
+ return rfc;
+}
+
+static int
+prestera_k_arb_fib_evt(struct prestera_switch *sw,
+ bool replace, /* replace or del */
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct prestera_kern_fib_cache *tfib_cache, *bfib_cache; /* top/btm */
+ struct prestera_kern_fib_cache_key fc_key;
+ struct prestera_kern_fib_cache *fib_cache;
+ int err;
+
+ prestera_util_fen_info2fib_cache_key(fen_info, &fc_key);
+ fib_cache = prestera_kern_fib_cache_find(sw, &fc_key);
+ if (fib_cache) {
+ fib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying destroyed fib_cache failed");
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache && bfib_cache) {
+ bfib_cache->reachable = true;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ prestera_kern_fib_cache_destroy(sw, fib_cache);
+ }
+
+ if (replace) {
+ fib_cache = prestera_kern_fib_cache_create(sw, &fc_key,
+ fen_info->fi,
+ fen_info->tos,
+ fen_info->type);
+ if (!fib_cache) {
+ dev_err(sw->dev->dev, "fib_cache == NULL");
+ return -ENOENT;
+ }
+
+ bfib_cache = __prestera_k_arb_util_fib_overlaps(sw, fib_cache);
+ tfib_cache = __prestera_k_arb_util_fib_overlapped(sw, fib_cache);
+ if (!tfib_cache)
+ fib_cache->reachable = true;
+
+ if (bfib_cache) {
+ bfib_cache->reachable = false;
+ err = __prestera_k_arb_fc_apply(sw, bfib_cache);
+ if (err)
+ dev_err(sw->dev->dev,
+ "Applying fib_cache btm failed");
+ }
+
+ err = __prestera_k_arb_fc_apply(sw, fib_cache);
+ if (err)
+ dev_err(sw->dev->dev, "Applying fib_cache failed");
+ }
+
+ return 0;
+}
+
static int __prestera_inetaddr_port_event(struct net_device *port_dev,
unsigned long event,
struct netlink_ext_ack *extack)
return notifier_from_errno(err);
}
+struct prestera_fib_event_work {
+ struct work_struct work;
+ struct prestera_switch *sw;
+ struct fib_entry_notifier_info fen_info;
+ unsigned long event;
+};
+
+static void __prestera_router_fib_event_work(struct work_struct *work)
+{
+ struct prestera_fib_event_work *fib_work =
+ container_of(work, struct prestera_fib_event_work, work);
+ struct prestera_switch *sw = fib_work->sw;
+ int err;
+
+ rtnl_lock();
+
+ switch (fib_work->event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = prestera_k_arb_fib_evt(sw, true, &fib_work->fen_info);
+ if (err)
+ goto err_out;
+
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ err = prestera_k_arb_fib_evt(sw, false, &fib_work->fen_info);
+ if (err)
+ goto err_out;
+
+ break;
+ }
+
+ goto out;
+
+err_out:
+ dev_err(sw->dev->dev, "Error when processing %pI4h/%d",
+ &fib_work->fen_info.dst,
+ fib_work->fen_info.dst_len);
+out:
+ fib_info_put(fib_work->fen_info.fi);
+ rtnl_unlock();
+ kfree(fib_work);
+}
+
+/* Called with rcu_read_lock() */
+static int __prestera_router_fib_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct prestera_fib_event_work *fib_work;
+ struct fib_entry_notifier_info *fen_info;
+ struct fib_notifier_info *info = ptr;
+ struct prestera_router *router;
+
+ if (info->family != AF_INET)
+ return NOTIFY_DONE;
+
+ router = container_of(nb, struct prestera_router, fib_nb);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ fen_info = container_of(info, struct fib_entry_notifier_info,
+ info);
+ if (!fen_info->fi)
+ return NOTIFY_DONE;
+
+ fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
+ if (WARN_ON(!fib_work))
+ return NOTIFY_BAD;
+
+ fib_info_hold(fen_info->fi);
+ fib_work->fen_info = *fen_info;
+ fib_work->event = event;
+ fib_work->sw = router->sw;
+ INIT_WORK(&fib_work->work, __prestera_router_fib_event_work);
+ prestera_queue_work(&fib_work->work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
int prestera_router_init(struct prestera_switch *sw)
{
struct prestera_router *router;
if (err)
goto err_router_lib_init;
+ err = rhashtable_init(&router->kern_fib_cache_ht,
+ &__prestera_kern_fib_cache_ht_params);
+ if (err)
+ goto err_kern_fib_cache_ht_init;
+
router->inetaddr_valid_nb.notifier_call = __prestera_inetaddr_valid_cb;
err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
if (err)
if (err)
goto err_register_inetaddr_notifier;
+ router->fib_nb.notifier_call = __prestera_router_fib_event;
+ err = register_fib_notifier(&init_net, &router->fib_nb,
+ /* TODO: flush fib entries */ NULL, NULL);
+ if (err)
+ goto err_register_fib_notifier;
+
return 0;
+err_register_fib_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
+ rhashtable_destroy(&router->kern_fib_cache_ht);
+err_kern_fib_cache_ht_init:
prestera_router_hw_fini(sw);
err_router_lib_init:
kfree(sw->router);
{
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
+ rhashtable_destroy(&sw->router->kern_fib_cache_ht);
prestera_router_hw_fini(sw);
kfree(sw->router);
sw->router = NULL;
#include "prestera_acl.h"
/* +--+
- * +------->|vr|
- * | +--+
- * |
- * +-+-------+
- * |rif_entry|
- * +---------+
- * Rif is
+ * +------->|vr|<-+
+ * | +--+ |
+ * | |
+ * +-+-------+ +--+---+-+
+ * |rif_entry| |fib_node|
+ * +---------+ +--------+
+ * Rif is Fib - is exit point
* used as
* entry point
* for vr in hw
*/
+#define PRESTERA_NHGR_UNUSED (0)
+#define PRESTERA_NHGR_DROP (0xFFFFFFFF)
+
+static const struct rhashtable_params __prestera_fib_ht_params = {
+ .key_offset = offsetof(struct prestera_fib_node, key),
+ .head_offset = offsetof(struct prestera_fib_node, ht_node),
+ .key_len = sizeof(struct prestera_fib_key),
+ .automatic_shrinking = true,
+};
+
int prestera_router_hw_init(struct prestera_switch *sw)
{
+ int err;
+
+ err = rhashtable_init(&sw->router->fib_ht,
+ &__prestera_fib_ht_params);
+ if (err)
+ goto err_fib_ht_init;
+
INIT_LIST_HEAD(&sw->router->vr_list);
INIT_LIST_HEAD(&sw->router->rif_entry_list);
+err_fib_ht_init:
return 0;
}
{
WARN_ON(!list_empty(&sw->router->vr_list));
WARN_ON(!list_empty(&sw->router->rif_entry_list));
+ rhashtable_destroy(&sw->router->fib_ht);
}
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
err_kzalloc:
return NULL;
}
+
+struct prestera_fib_node *
+prestera_fib_node_find(struct prestera_switch *sw, struct prestera_fib_key *key)
+{
+ struct prestera_fib_node *fib_node;
+
+ fib_node = rhashtable_lookup_fast(&sw->router->fib_ht, key,
+ __prestera_fib_ht_params);
+ return IS_ERR(fib_node) ? NULL : fib_node;
+}
+
+static void __prestera_fib_node_destruct(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ struct prestera_vr *vr;
+
+ vr = fib_node->info.vr;
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, fib_node->key.addr.u.ipv4,
+ fib_node->key.prefix_len);
+ switch (fib_node->info.type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ break;
+ default:
+ pr_err("Unknown fib_node->info.type = %d",
+ fib_node->info.type);
+ }
+
+ prestera_vr_put(sw, vr);
+}
+
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node)
+{
+ __prestera_fib_node_destruct(sw, fib_node);
+ rhashtable_remove_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ kfree(fib_node);
+}
+
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type)
+{
+ struct prestera_fib_node *fib_node;
+ u32 grp_id;
+ struct prestera_vr *vr;
+ int err;
+
+ fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
+ if (!fib_node)
+ goto err_kzalloc;
+
+ memcpy(&fib_node->key, key, sizeof(*key));
+ fib_node->info.type = fib_type;
+
+ vr = prestera_vr_get(sw, key->tb_id, NULL);
+ if (IS_ERR(vr))
+ goto err_vr_get;
+
+ fib_node->info.vr = vr;
+
+ switch (fib_type) {
+ case PRESTERA_FIB_TYPE_TRAP:
+ grp_id = PRESTERA_NHGR_UNUSED;
+ break;
+ case PRESTERA_FIB_TYPE_DROP:
+ grp_id = PRESTERA_NHGR_DROP;
+ break;
+ default:
+ pr_err("Unsupported fib_type %d", fib_type);
+ goto err_nh_grp_get;
+ }
+
+ err = prestera_hw_lpm_add(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len, grp_id);
+ if (err)
+ goto err_lpm_add;
+
+ err = rhashtable_insert_fast(&sw->router->fib_ht, &fib_node->ht_node,
+ __prestera_fib_ht_params);
+ if (err)
+ goto err_ht_insert;
+
+ return fib_node;
+
+err_ht_insert:
+ prestera_hw_lpm_del(sw, vr->hw_vr_id, key->addr.u.ipv4,
+ key->prefix_len);
+err_lpm_add:
+err_nh_grp_get:
+ prestera_vr_put(sw, vr);
+err_vr_get:
+ kfree(fib_node);
+err_kzalloc:
+ return NULL;
+}
struct list_head router_node; /* ht */
};
+struct prestera_ip_addr {
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } u;
+ enum {
+ PRESTERA_IPV4 = 0,
+ PRESTERA_IPV6
+ } v;
+};
+
+struct prestera_fib_key {
+ struct prestera_ip_addr addr;
+ u32 prefix_len;
+ u32 tb_id;
+};
+
+struct prestera_fib_info {
+ struct prestera_vr *vr;
+ struct list_head vr_node;
+ enum prestera_fib_type {
+ PRESTERA_FIB_TYPE_INVALID = 0,
+ /* It can be connected route
+ * and will be overlapped with neighbours
+ */
+ PRESTERA_FIB_TYPE_TRAP,
+ PRESTERA_FIB_TYPE_DROP
+ } type;
+};
+
+struct prestera_fib_node {
+ struct rhash_head ht_node; /* node of prestera_vr */
+ struct prestera_fib_key key;
+ struct prestera_fib_info info; /* action related info */
+};
+
struct prestera_rif_entry *
prestera_rif_entry_find(const struct prestera_switch *sw,
const struct prestera_rif_entry_key *k);
prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
+struct prestera_fib_node *prestera_fib_node_find(struct prestera_switch *sw,
+ struct prestera_fib_key *key);
+void prestera_fib_node_destroy(struct prestera_switch *sw,
+ struct prestera_fib_node *fib_node);
+struct prestera_fib_node *
+prestera_fib_node_create(struct prestera_switch *sw,
+ struct prestera_fib_key *key,
+ enum prestera_fib_type fib_type);
int prestera_router_hw_init(struct prestera_switch *sw);
void prestera_router_hw_fini(struct prestera_switch *sw);
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
- en/qos.o en/trap.o en/fs_tt_redirect.o
+ en/qos.o en/trap.o en/fs_tt_redirect.o en/selq.o
#
# Netdev extra
#include "lib/hv_vhca.h"
#include "lib/clock.h"
#include "en/rx_res.h"
+#include "en/selq.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
-#define MLX5E_MAX_KLM_PER_WQE \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(mlx5e_get_sw_max_sq_mpw_wqebbs(mlx5e_get_max_sq_wqebbs(mdev)) \
+ << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
+/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
+ * bytes units. Driver hardens the limitation to 1KB (16
+ * WQEBBs), unless firmware capability is stricter.
+ */
+static inline u16 mlx5e_get_max_sq_wqebbs(struct mlx5_core_dev *mdev)
+{
+ return min_t(u16, MLX5_SEND_WQE_MAX_WQEBBS,
+ MLX5_CAP_GEN(mdev, max_wqe_sz_sq) / MLX5_SEND_WQE_BB);
+}
+
+static inline u16 mlx5e_get_sw_max_sq_mpw_wqebbs(u16 max_sq_wqebbs)
+{
+/* The return value will be multiplied by MLX5_SEND_WQEBB_NUM_DS.
+ * Since max_sq_wqebbs may be up to MLX5_SEND_WQE_MAX_WQEBBS == 16,
+ * see mlx5e_get_max_sq_wqebbs(), the multiplication (16 * 4 == 64)
+ * overflows the 6-bit DS field of Ctrl Segment. Use a bound lower
+ * than MLX5_SEND_WQE_MAX_WQEBBS to let a full-session WQE be
+ * cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+ return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 1);
+#else
+ return min_t(u16, max_sq_wqebbs, MLX5_SEND_WQE_MAX_WQEBBS - 2);
+#endif
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
struct netdev_queue *txq;
u32 sqn;
u16 stop_room;
+ u16 max_sq_mpw_wqebbs;
u8 min_inline_mode;
struct device *pdev;
__be32 mkey_be;
unsigned long state;
unsigned int hw_mtu;
- struct hwtstamp_config *tstamp;
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
cqe_ts_to_ns ptp_cyc2time;
+ u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
u32 sqn;
struct device *pdev;
__be32 mkey_be;
+ u16 stop_room;
+ u16 max_sq_mpw_wqebbs;
u8 min_inline_mode;
unsigned long state;
unsigned int hw_mtu;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+ u16 max_sq_wqebbs;
} ____cacheline_aligned_in_smp;
struct mlx5e_ktls_resync_resp;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
struct mlx5e_channel *channel;
+ u16 max_sq_wqebbs;
struct work_struct recover_work;
} ____cacheline_aligned_in_smp;
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ u8 min_wqe_bulk;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
struct mlx5e_priv {
/* priv data path fields - start */
+ struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
- int **channel_tc2realtxq;
- int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
- int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
+{
+#define UMR_WQE_BULK (2)
+ return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
+}
+
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
u16 stop_room;
stop_room = mlx5e_tls_get_stop_room(mdev, params);
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
if (is_mpwqe)
/* A MPWQE can take up to the maximum-sized WQE + all the normal
* stop room can be taken if a new packet breaks the active
* MPWQE session and allocates its WQEs right away.
*/
- stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ stop_room += mlx5e_stop_room_for_max_wqe(mdev);
return stop_room;
}
{
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
- * - Slow PCI heuristic.
+ * - CQE compression is ON, and stride_index mini_cqe layout is not supported.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*
* No XSK params: checking the availability of striding RQ in general.
*/
- if (!slow_pci_heuristic(mdev) &&
+ if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
+ MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
!mlx5e_rx_is_linear_skb(params, NULL)))
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
mlx5e_build_sq_param_common(mdev, param);
- param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
+ param->stop_room = mlx5e_stop_room_for_wqe(mdev, 1); /* for XSK NOP */
param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
if (param->is_tls)
- param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
+ param->stop_room += mlx5e_stop_room_for_wqe(mdev, 1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
mlx5e_build_ico_cq_param(mdev, log_wq_size, ¶m->cqp);
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
int node;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ param->stop_room = mlx5e_stop_room_for_max_wqe(mdev);
mlx5e_build_tx_cq_param(mdev, params, ¶m->cqp);
}
struct mlx5e_qos_node {
struct hlist_node hnode;
- struct rcu_head rcu;
struct mlx5e_qos_node *parent;
u64 rate;
u32 bw_share;
__clear_bit(node->qid, priv->htb.qos_used_qids);
mlx5e_update_tx_netdev_queues(priv);
}
- kfree_rcu(node, rcu);
+ /* Make sure this qid is no longer selected by mlx5e_select_queue, so
+ * that mlx5e_reactivate_qos_sq can safely restart the netdev TX queue.
+ */
+ synchronize_net();
+ kfree(node);
}
/* TX datapath API */
static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
{
struct mlx5e_txqsq *sq;
+ u16 qid;
sq = mlx5e_get_qos_sq(priv, node->qid);
- WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+ qid = mlx5e_qid_from_qos(&priv->channels, node->qid);
+
+ /* If it's a new queue, it will be marked as started at this point.
+ * Stop it before updating txq2sq.
+ */
+ mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
+
+ priv->txq2sq[qid] = sq;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- /* The queue is disabled, no synchronization with datapath is needed. */
priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+
+ /* Make the change to txq2sq visible before the queue is started again.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (opened) {
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, true);
+
err = mlx5e_qos_alloc_queues(priv, &priv->channels);
if (err)
- return err;
+ goto err_cancel_selq;
}
root = mlx5e_sw_node_create_root(priv);
*/
smp_store_release(&priv->htb.maj_id, htb_maj_id);
+ if (opened)
+ mlx5e_selq_apply(&priv->selq);
+
return 0;
err_sw_node_delete:
err_free_queues:
if (opened)
mlx5e_qos_close_all_queues(&priv->channels);
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
+ /* Wait until real_num_tx_queues is updated for mlx5e_select_queue,
+ * so that we can safely switch to its non-HTB non-PTP fastpath.
+ */
+ synchronize_net();
+
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, false);
+ mlx5e_selq_apply(&priv->selq);
+
WRITE_ONCE(priv->htb.maj_id, 0);
- synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
if (!root) {
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
- err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
+ err = mlx5e_tc_esw_init(uplink_priv);
return err;
}
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
{
/* delete shared tc flow table */
- mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
+ mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
goto inner_tir;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
continue;
err = mlx5e_tir_modify(tir, builder);
if (err) {
- mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
+ mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "selq.h"
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/rcupdate.h>
+#include "en.h"
+#include "en/ptp.h"
+
+struct mlx5e_selq_params {
+ unsigned int num_regular_queues;
+ unsigned int num_channels;
+ unsigned int num_tcs;
+ union {
+ u8 is_special_queues;
+ struct {
+ bool is_htb : 1;
+ bool is_ptp : 1;
+ };
+ };
+};
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
+{
+ struct mlx5e_selq_params *init_params;
+
+ selq->state_lock = state_lock;
+
+ selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
+ if (!selq->standby)
+ return -ENOMEM;
+
+ init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
+ if (!init_params) {
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ return -ENOMEM;
+ }
+ /* Assign dummy values, so that mlx5e_select_queue won't crash. */
+ *init_params = (struct mlx5e_selq_params) {
+ .num_regular_queues = 1,
+ .num_channels = 1,
+ .num_tcs = 1,
+ .is_htb = false,
+ .is_ptp = false,
+ };
+ rcu_assign_pointer(selq->active, init_params);
+
+ return 0;
+}
+
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
+{
+ WARN_ON_ONCE(selq->is_prepared);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+ selq->is_prepared = true;
+
+ mlx5e_selq_apply(selq);
+
+ kvfree(selq->standby);
+ selq->standby = NULL;
+}
+
+void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(selq->is_prepared);
+
+ selq->is_prepared = true;
+
+ selq->standby->num_channels = params->num_channels;
+ selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
+ selq->standby->num_regular_queues =
+ selq->standby->num_channels * selq->standby->num_tcs;
+ selq->standby->is_htb = htb;
+ selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
+}
+
+void mlx5e_selq_apply(struct mlx5e_selq *selq)
+{
+ struct mlx5e_selq_params *old_params;
+
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+
+ old_params = rcu_replace_pointer(selq->active, selq->standby,
+ lockdep_is_held(selq->state_lock));
+ synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
+ selq->standby = old_params;
+}
+
+void mlx5e_selq_cancel(struct mlx5e_selq *selq)
+{
+ lockdep_assert_held(selq->state_lock);
+ WARN_ON_ONCE(!selq->is_prepared);
+
+ selq->is_prepared = false;
+}
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
+static int mlx5e_get_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (READ_ONCE(priv->dcbx_dp.trust_state) == MLX5_QPTS_TRUST_DSCP)
+ return mlx5e_get_dscp_up(priv, skb);
+#endif
+ if (skb_vlan_tag_present(skb))
+ return skb_vlan_tag_get_prio(skb);
+ return 0;
+}
+
+static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
+ struct mlx5e_selq_params *selq)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int up;
+
+ up = selq->num_tcs > 1 ? mlx5e_get_up(priv, skb) : 0;
+
+ return selq->num_regular_queues + up;
+}
+
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ u16 classid;
+
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(priv->htb.defcls);
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_get_txq_by_classid(priv, classid);
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_selq_params *selq;
+ int txq_ix, up;
+
+ selq = rcu_dereference_bh(priv->selq.active);
+
+ /* This is a workaround needed only for the mlx5e_netdev_change_profile
+ * flow that zeroes out the whole priv without unregistering the netdev
+ * and without preventing ndo_select_queue from being called.
+ */
+ if (unlikely(!selq))
+ return 0;
+
+ if (likely(!selq->is_special_queues)) {
+ /* No special queues, netdev_pick_tx returns one of the regular ones. */
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ /* Normalize any picked txq_ix to [0, num_channels),
+ * So we can return a txq_ix that matches the channel and
+ * packet UP.
+ */
+ return mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels) +
+ up * selq->num_channels;
+ }
+
+ if (unlikely(selq->is_htb)) {
+ /* num_tcs == 1, shortcut for PTP */
+
+ txq_ix = mlx5e_select_htb_queue(priv, skb);
+ if (txq_ix > 0)
+ return txq_ix;
+
+ if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
+ return selq->num_channels;
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
+ * If they are selected, switch to regular queues.
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
+ */
+ return mlx5e_txq_to_ch_ix_htb(txq_ix, selq->num_channels);
+ }
+
+ /* PTP is enabled */
+
+ if (mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb, selq);
+
+ txq_ix = netdev_pick_tx(dev, skb, NULL);
+
+ /* Normalize any picked txq_ix to [0, num_channels). Queues in range
+ * [0, num_regular_queues) will be mapped to the corresponding channel
+ * index, so that we can apply the packet's UP (if num_tcs > 1).
+ * If netdev_pick_tx() picks ptp_channel, switch to a regular queue,
+ * because driver should select the PTP only at mlx5e_select_ptpsq().
+ */
+ txq_ix = mlx5e_txq_to_ch_ix(txq_ix, selq->num_channels);
+
+ if (selq->num_tcs <= 1)
+ return txq_ix;
+
+ up = mlx5e_get_up(priv, skb);
+
+ return txq_ix + up * selq->num_channels;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_SELQ_H__
+#define __MLX5_EN_SELQ_H__
+
+#include <linux/kernel.h>
+
+struct mlx5e_selq_params;
+
+struct mlx5e_selq {
+ struct mlx5e_selq_params __rcu *active;
+ struct mlx5e_selq_params *standby;
+ struct mutex *state_lock; /* points to priv->state_lock */
+ bool is_prepared;
+};
+
+struct mlx5e_params;
+struct net_device;
+struct sk_buff;
+
+int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock);
+void mlx5e_selq_cleanup(struct mlx5e_selq *selq);
+void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb);
+void mlx5e_selq_apply(struct mlx5e_selq *selq);
+void mlx5e_selq_cancel(struct mlx5e_selq *selq);
+
+static inline u16 mlx5e_txq_to_ch_ix(u16 txq, u16 num_channels)
+{
+ while (unlikely(txq >= num_channels))
+ txq -= num_channels;
+ return txq;
+}
+
+static inline u16 mlx5e_txq_to_ch_ix_htb(u16 txq, u16 num_channels)
+{
+ if (unlikely(txq >= num_channels)) {
+ if (unlikely(txq >= num_channels << 3))
+ txq %= num_channels;
+ else
+ do
+ txq -= num_channels;
+ while (txq >= num_channels);
+ }
+ return txq;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+
+#endif /* __MLX5_EN_SELQ_H__ */
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->flags |= MLX5_ATTR_FLAG_ACCEPT;
return 0;
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
+#include "en/tc/post_act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}
+
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder)
+{
+ struct flow_action_entry *act;
+ int i, j = 0;
+
+ flow_action_for_each(i, act, flow_action) {
+ /* Add CT action to be first. */
+ if (act->id == FLOW_ACTION_CT)
+ flow_action_reorder->entries[j++] = act;
+ }
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT)
+ continue;
+ flow_action_reorder->entries[j++] = act;
+ }
+}
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct flow_action_entry *act;
+ struct mlx5e_tc_act *tc_act;
+ struct mlx5e_priv *priv;
+ int err = 0, i;
+
+ priv = parse_state->flow->priv;
+
+ flow_action_for_each(i, act, flow_action) {
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act || !tc_act->post_parse ||
+ !tc_act->can_offload(parse_state, act, i, attr))
+ continue;
+
+ err = tc_act->post_parse(parse_state, priv, attr);
+ if (err)
+ goto out;
+ }
+
+out:
+ return err;
+}
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr)
+{
+ struct mlx5_core_dev *mdev = flow->priv->mdev;
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
+ int err;
+
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+
+ /* Set handle on current post act rule to next post act rule. */
+ err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
+ if (err) {
+ mlx5_core_warn(mdev, "Failed setting post action handle");
+ return err;
+ }
+
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
+}
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
+ u32 actions;
bool ct;
bool encap;
bool decap;
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
+
+ bool (*is_multi_table_act)(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr);
+};
+
+struct mlx5e_tc_flow_action {
+ unsigned int num_entries;
+ struct flow_action_entry **entries;
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
+void
+mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
+ struct mlx5e_tc_flow_action *flow_action_reorder);
+
+int
+mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action,
+ struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type);
+
+int
+mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_attr *next_attr);
+
#endif /* __MLX5_EN_TC_ACT_H__ */
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
struct netlink_ext_ack *extack = parse_state->extack;
- if (flow_flag_test(parse_state->flow, SAMPLE)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
- return false;
- }
-
if (parse_state->ct && !clear_action) {
- NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supoported");
+ NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
return false;
}
return 0;
}
+static bool
+tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ if (act->ct.action & TCA_CT_ACT_CLEAR)
+ return false;
+
+ return true;
+}
+
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
+ .is_multi_table_act = tc_act_is_multi_table_act_ct,
};
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
return 0;
}
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_chain = act->chain_index;
return 0;
if (err)
return err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
struct net_device *out_dev = act->dev;
int err;
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
+#include "en/tc/act/sample.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
+ bool ct_nat;
- if (flow_flag_test(parse_state->flow, CT)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Sample action with connection tracking is not supported");
+ ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+
+ if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
+ NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
return false;
}
return 0;
}
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr)
+{
+ if (MLX5_CAP_GEN(mdev, reg_c_preserve) ||
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return true;
+
+ return false;
+}
+
+static bool
+tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
+ const struct flow_action_entry *act,
+ struct mlx5_flow_attr *attr)
+{
+ return mlx5e_tc_act_sample_is_multi_table(priv->mdev, attr);
+}
+
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
+ .is_multi_table_act = tc_act_is_multi_table_act_sample,
};
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_ACT_SAMPLE_H__
+#define __MLX5_EN_TC_ACT_SAMPLE_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+bool
+mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_attr *attr);
+
+#endif /* __MLX5_EN_TC_ACT_SAMPLE_H__ */
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
- MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
return 0;
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#include "en/tc_priv.h"
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
kfree(post_act);
}
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Post action rule matches on fte_id and executes original rule's tc rule action */
+ mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
+
+ handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
+ if (IS_ERR(handle->rule)) {
+ err = PTR_ERR(handle->rule);
+ netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
+ goto err_rule;
+ }
+
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
{
u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
- struct mlx5e_post_act_handle *handle = NULL;
- struct mlx5_flow_attr *post_attr = NULL;
- struct mlx5_flow_spec *spec = NULL;
+ struct mlx5e_post_act_handle *handle;
+ struct mlx5_flow_attr *post_attr;
int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
- if (!handle || !spec || !post_attr) {
+ if (!handle || !post_attr) {
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(-ENOMEM);
}
post_attr->ft = post_act->ft;
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
- post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
- post_attr->flags &= ~MLX5_ATTR_FLAG_SAMPLE;
+ post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
if (err)
goto err_xarray;
- /* Post action rule matches on fte_id and executes original rule's
- * tc rule action
- */
- mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG,
- handle->id, MLX5_POST_ACTION_MASK);
-
- handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr);
- if (IS_ERR(handle->rule)) {
- err = PTR_ERR(handle->rule);
- netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
- goto err_rule;
- }
handle->attr = post_attr;
- kvfree(spec);
return handle;
-err_rule:
- xa_erase(&post_act->ids, handle->id);
err_xarray:
kfree(post_attr);
- kvfree(spec);
kfree(handle);
return ERR_PTR(err);
}
+void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle)
+{
+ mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
+ handle->rule = NULL;
+}
+
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
{
- mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr);
+ if (!IS_ERR_OR_NULL(handle->rule))
+ mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id);
kfree(handle->attr);
kfree(handle);
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
+int
+mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
+void
+mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
+ struct mlx5e_post_act_handle *handle);
+
struct mlx5_flow_table *
mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act);
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
+#include "en/tc/act/sample.h"
#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
struct mlx5_flow_handle *pre_rule;
struct mlx5_flow_attr *post_attr;
struct mlx5_flow_handle *post_rule;
- struct mlx5e_post_act_handle *post_act_handle;
};
struct mlx5e_sample_restore {
struct hlist_node hlist;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *rule;
- struct mlx5e_post_act_handle *post_act_handle;
u32 obj_id;
int count;
};
*/
static struct mlx5_modify_hdr *
sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
- struct mlx5e_post_act_handle *handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5_modify_hdr *modify_hdr;
int err;
- err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
CHAIN_TO_REG, obj_id);
if (err)
goto err_set_regc0;
- if (handle) {
- err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts);
- if (err)
- goto err_post_act;
- }
-
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_acts.num_actions,
- mod_acts.actions);
+ mod_acts->num_actions,
+ mod_acts->actions);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
}
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
return modify_hdr;
err_modify_hdr:
-err_post_act:
- mlx5e_mod_hdr_dealloc(&mod_acts);
+ mlx5e_mod_hdr_dealloc(mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
-static u32
-restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle)
-{
- return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0);
-}
-
-static bool
-restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
-{
- return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle;
-}
-
static struct mlx5e_sample_restore *
sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
- struct mlx5e_post_act_handle *post_act_handle)
+ struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct mlx5_eswitch *esw = tc_psample->esw;
struct mlx5_core_dev *mdev = esw->dev;
struct mlx5e_sample_restore *restore;
struct mlx5_modify_hdr *modify_hdr;
- u32 hash_key;
int err;
mutex_lock(&tc_psample->restore_lock);
- hash_key = restore_hash(obj_id, post_act_handle);
- hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key)
- if (restore_equal(restore, obj_id, post_act_handle))
+ hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
+ if (restore->obj_id == obj_id)
goto add_ref;
restore = kzalloc(sizeof(*restore), GFP_KERNEL);
goto err_alloc;
}
restore->obj_id = obj_id;
- restore->post_act_handle = post_act_handle;
- modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle);
+ modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
goto err_restore;
}
- hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key);
+ hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
add_ref:
restore->count++;
mutex_unlock(&tc_psample->restore_lock);
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5e_post_act_handle *post_act_handle = NULL;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_esw_flow_attr *pre_esw_attr;
struct mlx5_mapped_obj restore_obj = {};
+ struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5e_sample_flow *sample_flow;
struct mlx5e_sample_attr *sample_attr;
struct mlx5_flow_attr *pre_attr;
* original flow table.
*/
esw = tc_psample->esw;
- if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
- attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+ if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
struct mlx5_flow_table *ft;
ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
default_tbl_id = ft->id;
- post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr);
- if (IS_ERR(post_act_handle)) {
- err = PTR_ERR(post_act_handle);
- goto err_post_act;
- }
- sample_flow->post_act_handle = post_act_handle;
} else {
err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
if (err)
sample_attr->restore_obj_id = obj_id;
/* Create sample restore context. */
- sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle);
+ mod_acts = &attr->parse_attr->mod_hdr_acts;
+ sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
if (IS_ERR(sample_flow->restore)) {
err = PTR_ERR(sample_flow->restore);
goto err_sample_restore;
pre_attr->outer_match_level = attr->outer_match_level;
pre_attr->chain = attr->chain;
pre_attr->prio = attr->prio;
+ pre_attr->ft = attr->ft;
pre_attr->sample_attr = *sample_attr;
pre_esw_attr = pre_attr->esw_attr;
pre_esw_attr->in_mdev = esw_attr->in_mdev;
if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
- if (post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle);
-err_post_act:
kfree(sample_flow);
return ERR_PTR(err);
}
sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle)
- mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- else
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
+#include "en/tc_priv.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
#include "en/tc/post_act.h"
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
- struct mlx5e_post_act_handle *post_act_handle;
struct mlx5_ct_ft *ft;
u32 chain_mapping;
};
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + ft prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
* v
* +---------------------+
* + pre_ct/pre_ct_nat + if matches +-------------------------+
- * + zone+nat match +---------------->+ post_act (see below) +
+ * + zone+nat match +---------------->+ post_act (see below) +
* +---------------------+ set zone +-------------------------+
* | set zone
* v
* | do nat (if needed)
* v
* +--------------+
- * + post_act + original filter actions
+ * + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*/
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
- struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
}
ct_flow->ft = ft;
- handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr);
- if (IS_ERR(handle)) {
- err = PTR_ERR(handle);
- ct_dbg("Failed to allocate post action handle");
- goto err_post_act_handle;
- }
- ct_flow->post_act_handle = handle;
-
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!ct_flow->pre_ct_attr) {
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
+ pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts);
- if (err) {
- ct_dbg("Failed to set post action handle");
- goto err_mapping;
- }
-
/* If original flow is decap, we do it before going into ct table
* so add a rewrite for the tunnel match_id.
*/
if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
attr->chain == 0) {
- err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
ct_priv->ns_type,
TUNNEL_TO_REG,
attr->tunnel_id);
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
- pre_mod_acts.num_actions,
- pre_mod_acts.actions);
+ pre_mod_acts->num_actions,
+ pre_mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to create pre ct mod hdr");
}
attr->ct_attr.ct_flow = ct_flow;
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
- mlx5e_mod_hdr_dealloc(&pre_mod_acts);
+ mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
- mlx5e_tc_post_act_del(ct_priv->post_act, handle);
-err_post_act_handle:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
kfree(ct_flow);
mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- if (ct_flow->post_act_handle) {
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
- mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle);
- mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
- }
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
kfree(ct_flow);
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
+ struct list_head attrs;
};
struct mlx5_flow_handle *
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
+
+void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
+int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
+
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Do not offload flows with unresolved neighbors */
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
continue;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ continue;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
+ spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+
+ attr = mlx5e_tc_get_encap_attr(flow);
+ esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5e_tc_unoffload_flow_post_acts(flow);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
if (attr->esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5e_detach_encap_route(priv, flow, out_index);
list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
if (flow_flag_test(flow, FAILED))
continue;
+ spec = &flow->attr->parse_attr->spec;
+
+ attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
- spec = &parse_attr->spec;
err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
e->out_dev, e->route_dev_ifindex,
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
goto offload_to_slow_path;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
+ err);
+ goto offload_to_slow_path;
+ }
+
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
+ mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
-
-#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
-
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
/* TX */
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
-static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
- return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+ return session->ds_count == max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
}
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
}
}
-static inline u16 mlx5e_stop_room_for_wqe(u16 wqe_size)
-{
- BUILD_BUG_ON(PAGE_SIZE / MLX5_SEND_WQE_BB < MLX5_SEND_WQE_MAX_WQEBBS);
+#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
+static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
+{
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
* 2. If the WQE size is X, and the space remaining in a page is less
* stop room of X-1 + X.
* WQE size is also limited by the hardware limit.
*/
+ WARN_ONCE(wqe_size > mlx5e_get_max_sq_wqebbs(mdev),
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, mlx5e_get_max_sq_wqebbs(mdev));
- if (__builtin_constant_p(wqe_size))
- BUILD_BUG_ON(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- else
- WARN_ON_ONCE(wqe_size > MLX5_SEND_WQE_MAX_WQEBBS);
- return wqe_size * 2 - 1;
+ return MLX5E_STOP_ROOM(wqe_size);
+}
+
+static inline u16 mlx5e_stop_room_for_max_wqe(struct mlx5_core_dev *mdev)
+{
+ return MLX5E_STOP_ROOM(mlx5e_get_max_sq_wqebbs(mdev));
}
static inline bool mlx5e_icosq_can_post_wqe(struct mlx5e_icosq *sq, u16 wqe_size)
{
- u16 room = sq->reserved_room + mlx5e_stop_room_for_wqe(wqe_size);
+ u16 room = sq->reserved_room;
+
+ WARN_ONCE(wqe_size > sq->max_sq_wqebbs,
+ "wqe_size %u is greater than max SQ WQEBBs %u",
+ wqe_size, sq->max_sq_wqebbs);
+
+ room += MLX5E_STOP_ROOM(wqe_size);
return mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room);
}
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_xdpsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
{
if (unlikely(!sq->mpwqe.wqe)) {
- const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc,
- stop_room))) {
+ sq->stop_room))) {
/* SQ is full, ring doorbell */
mlx5e_xmit_xdp_doorbell(sq);
sq->stats->full++;
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
+ if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
return cur;
}
-static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
{
if (session->inline_on)
return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
- MLX5E_TX_MPW_MAX_NUM_DS;
- return mlx5e_tx_mpwqe_is_full(session);
+ max_sq_mpw_wqebbs * MLX5_SEND_WQEBB_NUM_DS;
+
+ return mlx5e_tx_mpwqe_is_full(session, max_sq_mpw_wqebbs);
}
struct mlx5e_xdp_wqe_info {
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
- stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
- stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
+ stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
+ stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
return stop_room;
}
/* FPGA */
/* Resync SKB. */
- return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ return mlx5e_stop_room_for_max_wqe(mdev);
}
err = mlx5_set_trust_state(priv->mdev, *trust_state);
if (err)
return err;
- priv->dcbx_dp.trust_state = *trust_state;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, *trust_state);
return 0;
}
static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ u8 trust_state;
int err;
- priv->dcbx_dp.trust_state = MLX5_QPTS_TRUST_PCP;
-
- if (!MLX5_DSCP_SUPPORTED(mdev))
+ if (!MLX5_DSCP_SUPPORTED(mdev)) {
+ WRITE_ONCE(priv->dcbx_dp.trust_state, MLX5_QPTS_TRUST_PCP);
return 0;
+ }
- err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ err = mlx5_query_trust_state(priv->mdev, &trust_state);
if (err)
return err;
+ WRITE_ONCE(priv->dcbx_dp.trust_state, trust_state);
mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &priv->channels.params,
priv->dcbx_dp.trust_state);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
- bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) &&
- MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
- MLX5_CAP_ETH(mdev, reg_umr_sq);
- u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq);
- bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap;
+ bool striding_rq_umr, inline_umr;
+ u16 max_wqe_sz_cap;
+ striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+ max_wqe_sz_cap = mlx5e_get_max_sq_wqebbs(mdev) * MLX5_SEND_WQE_BB;
+ inline_umr = max_wqe_sz_cap >= MLX5E_UMR_WQE_INLINE_SZ;
if (!striding_rq_umr)
return false;
if (!inline_umr) {
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
is_redirect ?
&c->priv->channel_stats[c->ix]->xdpsq :
&c->priv->channel_stats[c->ix]->rq_xdpsq;
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ sq->stop_room = MLX5E_STOP_ROOM(sq->max_sq_wqebbs);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
sq->channel = c;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->reserved_room = param->stop_room;
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
int err;
sq->pdev = c->pdev;
- sq->tstamp = c->tstamp;
sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ sq->max_sq_wqebbs = mlx5e_get_max_sq_wqebbs(mdev);
+ sq->max_sq_mpw_wqebbs = mlx5e_get_sw_max_sq_mpw_wqebbs(sq->max_sq_wqebbs);
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
- priv->channel_tc2realtxq[i][tc] = i + tc * ch;
}
}
if (!priv->channels.ptp)
- return;
+ goto out;
if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
- return;
+ goto out;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
- priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
}
-}
-static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
-{
- /* Sync with mlx5e_select_queue. */
- WRITE_ONCE(priv->num_tc_x_num_ch,
- mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num);
+out:
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
}
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
- mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
+
+ /* dev_watchdog() wants all TX queues to be started when the carrier is
+ * OK, including the ones in range real_num_tx_queues..num_tx_queues-1.
+ * Make it happy to avoid TX timeout false alarms.
+ */
netif_tx_start_all_queues(priv->netdev);
if (mlx5e_is_vport_rep(priv))
if (mlx5e_is_vport_rep(priv))
mlx5e_remove_sqs_fwd_rules(priv);
- /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
- * polling for inactive tx queues.
+ /* The results of ndo_select_queue are unreliable, while netdev config
+ * is being changed (real_num_tx_queues, num_tc). Stop all queues to
+ * prevent ndo_start_xmit from being called, so that it can assume that
+ * the selected queue is always valid.
*/
- netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
+
mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
mlx5e_close_channels(&old_chs);
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
out:
mlx5e_activate_priv_channels(priv);
return mlx5e_switch_priv_params(priv, params, preactivate, context);
new_chs.params = *params;
+
+ mlx5e_selq_prepare(&priv->selq, &new_chs.params, !!priv->htb.maj_id);
+
err = mlx5e_open_channels(priv, &new_chs);
if (err)
- return err;
+ goto err_cancel_selq;
+
err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
if (err)
- mlx5e_close_channels(&new_chs);
+ goto err_close;
+
+ return 0;
+
+err_close:
+ mlx5e_close_channels(&new_chs);
+err_cancel_selq:
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
+ mlx5e_selq_prepare(&priv->selq, &priv->channels.params, !!priv->htb.maj_id);
+
set_bit(MLX5E_STATE_OPENED, &priv->state);
err = mlx5e_open_channels(priv, &priv->channels);
goto err_clear_state_opened_flag;
priv->profile->update_rx(priv);
+ mlx5e_selq_apply(&priv->selq);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
err_clear_state_opened_flag:
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ mlx5e_selq_cancel(&priv->selq);
return err;
}
priv->max_nch);
mlx5e_params_mqprio_reset(params);
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
struct net_device *netdev,
struct mlx5_core_dev *mdev)
{
- int nch, num_txqs, node, i;
+ int nch, num_txqs, node;
+ int err;
num_txqs = netdev->num_tx_queues;
nch = mlx5e_calc_max_nch(mdev, netdev, profile);
return -ENOMEM;
mutex_init(&priv->state_lock);
+
+ err = mlx5e_selq_init(&priv->selq, &priv->state_lock);
+ if (err)
+ goto err_free_cpumask;
+
hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
priv->wq = create_singlethread_workqueue("mlx5e");
if (!priv->wq)
- goto err_free_cpumask;
+ goto err_free_selq;
priv->txq2sq = kcalloc_node(num_txqs, sizeof(*priv->txq2sq), GFP_KERNEL, node);
if (!priv->txq2sq)
if (!priv->tx_rates)
goto err_free_txq2sq;
- priv->channel_tc2realtxq =
- kcalloc_node(nch, sizeof(*priv->channel_tc2realtxq), GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq)
- goto err_free_tx_rates;
-
- for (i = 0; i < nch; i++) {
- priv->channel_tc2realtxq[i] =
- kcalloc_node(profile->max_tc, sizeof(**priv->channel_tc2realtxq),
- GFP_KERNEL, node);
- if (!priv->channel_tc2realtxq[i])
- goto err_free_channel_tc2realtxq;
- }
-
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
if (!priv->channel_stats)
- goto err_free_channel_tc2realtxq;
+ goto err_free_tx_rates;
return 0;
-err_free_channel_tc2realtxq:
- while (--i >= 0)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
err_free_tx_rates:
kfree(priv->tx_rates);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
destroy_workqueue(priv->wq);
+err_free_selq:
+ mlx5e_selq_cleanup(&priv->selq);
err_free_cpumask:
free_cpumask_var(priv->scratchpad.cpumask);
return -ENOMEM;
for (i = 0; i < priv->stats_nch; i++)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
- for (i = 0; i < priv->max_nch; i++)
- kfree(priv->channel_tc2realtxq[i]);
- kfree(priv->channel_tc2realtxq);
kfree(priv->tx_rates);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
+ mutex_lock(&priv->state_lock);
+ mlx5e_selq_cleanup(&priv->selq);
+ mutex_unlock(&priv->state_lock);
free_cpumask_var(priv->scratchpad.cpumask);
for (i = 0; i < priv->htb.max_qos_sqs; i++)
}
netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
dev_net_set(netdev, mlx5_core_net(mdev));
return netdev;
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
+#include "en/ptp.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int n, tc, nch, num_sqs = 0;
struct mlx5e_channel *c;
- int n, tc, num_sqs = 0;
int err = -ENOMEM;
+ bool ptp_sq;
u32 *sqs;
- sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
- sizeof(*sqs), GFP_KERNEL);
+ ptp_sq = !!(priv->channels.ptp &&
+ MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
+ nch = priv->channels.num + ptp_sq;
+
+ sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs),
+ GFP_KERNEL);
if (!sqs)
goto out;
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
+ if (ptp_sq) {
+ struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
+
+ for (tc = 0; tc < ptp_ch->num_tc; tc++)
+ sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
+ }
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
params->mqprio.num_tc = 1;
params->tunneled_offload_en = false;
- /* Set an initial non-zero value, so that mlx5e_select_queue won't
- * divide by zero if called before first activating channels.
- */
- priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc;
-
mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
}
return err;
}
+ err = mlx5e_tc_ht_init(&rpriv->tc_ht);
+ if (err)
+ goto err_ht_init;
+
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
- goto destroy_tises;
+ goto err_init_tx;
}
return 0;
-destroy_tises:
+err_init_tx:
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
+err_ht_init:
mlx5e_destroy_tises(priv);
return err;
}
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
+
+ mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
&MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif
+ &MLX5E_STATS_GRP(ptp),
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
struct mlx5e_post_act;
struct mlx5_rep_uplink_priv {
- /* Filters DB - instantiated by the uplink representor and shared by
- * the uplink's VFs
- */
- struct rhashtable tc_ht;
-
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
+ struct rhashtable tc_ht;
};
static inline
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
rq->stats->congst_umr++;
-#define UMR_WQE_BULK (2)
- if (likely(missing < UMR_WQE_BULK))
+ if (likely(missing < rq->mpwqe.min_wqe_bulk))
return false;
if (rq->page_pool)
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
- u32 cqe_bcnt)
+ u32 cqe_bcnt, u32 metasize)
{
struct sk_buff *skb = build_skb(va, frag_size);
skb_reserve(skb, headroom);
skb_put(skb, cqe_bcnt);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
return skb;
}
u32 len, struct xdp_buff *xdp)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, false);
+ xdp_prepare_buff(xdp, va, headroom, len, true);
}
static struct sk_buff *
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
+ u32 metasize;
va = page_address(di->page) + wi->offset;
data = va + rx_headroom;
rx_headroom = xdp.data - xdp.data_hard_start;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
+ metasize = xdp.data - xdp.data_meta;
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
+ u32 metasize;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
rx_headroom = xdp.data - xdp.data_hard_start;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
+ metasize = xdp.data - xdp.data_meta;
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
if (unlikely(!skb))
return NULL;
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
prefetchw(hdr);
prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
return NULL;
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
-static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+MLX5E_DEFINE_STATS_GRP(ptp, 0);
static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
+extern MLX5E_DECLARE_STATS_GRP(ptp);
#endif /* __MLX5_EN_STATS_H__ */
static struct lock_class_key tc_ht_lock_key;
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
+static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
return NULL;
}
+static struct mlx5e_post_act *
+get_post_action(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->post_act;
+ }
+
+ return priv->fs.tc.post_act;
+}
+
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+ free_flow_post_acts(flow);
+
kvfree(attr->parse_attr);
kfree(flow->attr);
}
int out_index;
int err = 0;
+ if (!mlx5e_is_eswitch_flow(flow))
+ return 0;
+
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
*vf_tun = false;
struct mlx5_esw_flow_attr *esw_attr;
int out_index;
+ if (!mlx5e_is_eswitch_flow(flow))
+ return;
+
esw_attr = attr->esw_attr;
*vf_tun = false;
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
- if (!encap_valid)
+ if (!encap_valid || flow_flag_test(flow, SLOW))
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ free_flow_post_acts(flow);
+
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- return flow->attr->counter;
+ struct mlx5_flow_attr *attr;
+
+ attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
+ return attr->counter;
}
/* Iterate over tmp_list of flows attached to flow_list head. */
static bool
actions_match_supported(struct mlx5e_priv *priv,
struct flow_action *flow_action,
+ u32 actions,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- u32 actions = flow->attr->action;
bool ct_flow, ct_clear;
ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
return (fsystem_guid == psystem_guid);
}
-static int
-parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
- struct flow_action *flow_action)
-{
- struct netlink_ext_ack *extack = parse_state->extack;
- struct mlx5e_tc_flow *flow = parse_state->flow;
- struct mlx5_flow_attr *attr = flow->attr;
- enum mlx5_flow_namespace_type ns_type;
- struct mlx5e_priv *priv = flow->priv;
- const struct flow_action_entry *act;
- struct mlx5e_tc_act *tc_act;
- int err, i;
-
- ns_type = mlx5e_get_flow_namespace(flow);
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act) {
- NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
- return -EOPNOTSUPP;
- }
-
- if (!tc_act->can_offload(parse_state, act, i, attr))
- return -EOPNOTSUPP;
-
- err = tc_act->parse_action(parse_state, act, priv, attr);
- if (err)
- return err;
- }
-
- flow_action_for_each(i, act, flow_action) {
- tc_act = mlx5e_tc_act_get(act->id, ns_type);
- if (!tc_act || !tc_act->post_parse ||
- !tc_act->can_offload(parse_state, act, i, attr))
- continue;
-
- err = tc_act->post_parse(parse_state, priv, attr);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int
actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
return 0;
}
+static struct mlx5_flow_attr*
+mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 attr_sz = ns_to_attr_sz(ns_type);
+ struct mlx5_flow_attr *attr2;
+
+ attr2 = mlx5_alloc_flow_attr(ns_type);
+ parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
+ if (!attr2 || !parse_attr) {
+ kvfree(parse_attr);
+ kfree(attr2);
+ return attr2;
+ }
+
+ memcpy(attr2, attr, attr_sz);
+ INIT_LIST_HEAD(&attr2->list);
+ parse_attr->filter_dev = attr->parse_attr->filter_dev;
+ attr2->action = 0;
+ attr2->flags = 0;
+ attr2->parse_attr = parse_attr;
+ return attr2;
+}
+
+static struct mlx5_core_dev *
+get_flow_counter_dev(struct mlx5e_tc_flow *flow)
+{
+ return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
+}
+
+struct mlx5_flow_attr *
+mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int i;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ esw_attr = attr->esw_attr;
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
+ return attr;
+ }
+ }
+
+ return NULL;
+}
+
+void
+mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
+ }
+}
+
+static void
+free_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *tmp;
+ bool vf_tun;
+
+ list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ if (attr->post_act_handle)
+ mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
+
+ clean_encap_dests(flow->priv, flow, attr, &vf_tun);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ mlx5_fc_destroy(counter_dev, attr->counter);
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
+ if (attr->modify_hdr)
+ mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ }
+
+ list_del(&attr->list);
+ kvfree(attr->parse_attr);
+ kfree(attr);
+ }
+}
+
+int
+mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr;
+ int err = 0;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+/* TC filter rule HW translation:
+ *
+ * +---------------------+
+ * + ft prio (tc chain) +
+ * + original match +
+ * +---------------------+
+ * |
+ * | if multi table action
+ * |
+ * v
+ * +---------------------+
+ * + post act ft |<----.
+ * + match fte id | | split on multi table action
+ * + do actions |-----'
+ * +---------------------+
+ * |
+ * |
+ * v
+ * Do rest of the actions after last multi table action.
+ */
+static int
+alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_post_act *post_act = get_post_action(flow->priv);
+ struct mlx5_flow_attr *attr, *next_attr = NULL;
+ struct mlx5e_post_act_handle *handle;
+ bool vf_tun, encap_valid = true;
+ int err;
+
+ /* This is going in reverse order as needed.
+ * The first entry is the last attribute.
+ */
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (!next_attr) {
+ /* Set counter action on last post act rule. */
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ } else {
+ err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
+ if (err)
+ goto out_free;
+ }
+
+ /* Don't add post_act rule for first attr (last in the list).
+ * It's being handled by the caller.
+ */
+ if (list_is_last(&attr->list, &flow->attrs))
+ break;
+
+ err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
+ if (err)
+ goto out_free;
+
+ if (!encap_valid)
+ flow_flag_set(flow, SLOW);
+
+ err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
+ if (err)
+ goto out_free;
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
+ if (err)
+ goto out_free;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
+ if (err)
+ goto out_free;
+ }
+
+ handle = mlx5e_tc_post_act_add(post_act, attr);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto out_free;
+ }
+
+ attr->post_act_handle = handle;
+ next_attr = attr;
+ }
+
+ if (flow_flag_test(flow, SLOW))
+ goto out;
+
+ err = mlx5e_tc_offload_flow_post_acts(flow);
+ if (err)
+ goto out_free;
+
+out:
+ return 0;
+
+out_free:
+ free_flow_post_acts(flow);
+ return err;
+}
+
+static int
+parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
+ struct flow_action *flow_action)
+{
+ struct netlink_ext_ack *extack = parse_state->extack;
+ struct mlx5e_tc_flow_action flow_action_reorder;
+ struct mlx5e_tc_flow *flow = parse_state->flow;
+ struct mlx5_flow_attr *attr = flow->attr;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5e_priv *priv = flow->priv;
+ struct flow_action_entry *act, **_act;
+ struct mlx5e_tc_act *tc_act;
+ int err, i;
+
+ flow_action_reorder.num_entries = flow_action->num_entries;
+ flow_action_reorder.entries = kcalloc(flow_action->num_entries,
+ sizeof(flow_action), GFP_KERNEL);
+ if (!flow_action_reorder.entries)
+ return -ENOMEM;
+
+ mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
+
+ ns_type = mlx5e_get_flow_namespace(flow);
+ list_add(&attr->list, &flow->attrs);
+
+ flow_action_for_each(i, _act, &flow_action_reorder) {
+ act = *_act;
+ tc_act = mlx5e_tc_act_get(act->id, ns_type);
+ if (!tc_act) {
+ NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ if (!tc_act->can_offload(parse_state, act, i, attr)) {
+ err = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ err = tc_act->parse_action(parse_state, act, priv, attr);
+ if (err)
+ goto out_free;
+
+ parse_state->actions |= attr->action;
+
+ /* Split attr for multi table act if not the last act. */
+ if (tc_act->is_multi_table_act &&
+ tc_act->is_multi_table_act(priv, act, attr) &&
+ i < flow_action_reorder.num_entries - 1) {
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free;
+
+ attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ list_add(&attr->list, &flow->attrs);
+ }
+ }
+
+ kfree(flow_action_reorder.entries);
+
+ err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+ if (err)
+ goto out_free_post_acts;
+
+ err = alloc_flow_post_acts(flow, extack);
+ if (err)
+ goto out_free_post_acts;
+
+ return 0;
+
+out_free:
+ kfree(flow_action_reorder.entries);
+out_free_post_acts:
+ free_flow_post_acts(flow);
+
+ return err;
+}
+
static int
flow_action_supported(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
if (err)
return err;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
+ if (!actions_match_supported(priv, flow_action, parse_state->actions,
+ parse_attr, flow, extack))
return -EOPNOTSUPP;
return 0;
static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
unsigned long flags)
{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_rep_priv *rpriv;
if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- return &uplink_rpriv->uplink_priv.tc_ht;
+ rpriv = priv->ppriv;
+ return &rpriv->tc_ht;
} else /* NIC offload */
return &priv->fs.tc.ht;
}
sizeof(struct mlx5_nic_flow_attr);
struct mlx5_flow_attr *attr;
- return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+ if (!attr)
+ return attr;
+
+ INIT_LIST_HEAD(&attr->list);
+ return attr;
}
static int
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
+ INIT_LIST_HEAD(&flow->attrs);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
mlx5_chains_destroy(tc->chains);
}
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
+{
+ int err;
+
+ err = rhashtable_init(tc_ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+
+ return 0;
+}
+
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
+{
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+}
+
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
- struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
struct mlx5_eswitch *esw;
u64 mapping_id;
int err = 0;
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
priv = netdev_priv(rpriv->netdev);
esw = priv->mdev->priv.eswitch;
}
uplink_priv->tunnel_enc_opts_mapping = mapping;
- err = rhashtable_init(tc_ht, &tc_ht_params);
- if (err)
- goto err_ht_init;
-
- lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
-
uplink_priv->encap = mlx5e_tc_tun_init(priv);
if (IS_ERR(uplink_priv->encap)) {
err = PTR_ERR(uplink_priv->encap);
return 0;
err_register_fib_notifier:
- rhashtable_destroy(tc_ht);
-err_ht_init:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
return err;
}
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
{
- struct mlx5_rep_uplink_priv *uplink_priv;
-
- uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
-
- rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
mlx5e_tc_tun_cleanup(uplink_priv->encap);
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
-
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
+ struct list_head list;
+ struct mlx5e_post_act_handle *post_act_handle;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
-int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
-void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
+void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
+
+int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
+void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
+static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
}
}
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
-{
- int dscp_cp = 0;
-
- if (skb->protocol == htons(ETH_P_IP))
- dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- else if (skb->protocol == htons(ETH_P_IPV6))
- dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-
- return priv->dcbx_dp.dscp2prio[dscp_cp];
-}
-#endif
-
-static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int up = 0;
-
- if (!netdev_get_num_tc(dev))
- goto return_txq;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
-return_txq:
- return priv->port_ptp_tc2realtxq[up];
-}
-
-static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
- u16 htb_maj_id)
-{
- u16 classid;
-
- if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
- classid = TC_H_MIN(skb->priority);
- else
- classid = READ_ONCE(priv->htb.defcls);
-
- if (!classid)
- return 0;
-
- return mlx5e_get_txq_by_classid(priv, classid);
-}
-
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- int num_tc_x_num_ch;
- int txq_ix;
- int up = 0;
- int ch_ix;
-
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
- if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
- struct mlx5e_ptp *ptp_channel;
-
- /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
- u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
-
- if (unlikely(htb_maj_id)) {
- txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
- if (txq_ix > 0)
- return txq_ix;
- }
-
- ptp_channel = READ_ONCE(priv->channels.ptp);
- if (unlikely(ptp_channel &&
- test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
- mlx5e_use_ptpsq(skb)))
- return mlx5e_select_ptpsq(dev, skb);
-
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
- * If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq()
- * and mlx5e_select_htb_queue().
- */
- if (unlikely(txq_ix >= num_tc_x_num_ch))
- txq_ix %= num_tc_x_num_ch;
- } else {
- txq_ix = netdev_pick_tx(dev, skb, NULL);
- }
-
- if (!netdev_get_num_tc(dev))
- return txq_ix;
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
- up = mlx5e_get_dscp_up(priv, skb);
- else
-#endif
- if (skb_vlan_tag_present(skb))
- up = skb_vlan_tag_get_prio(skb);
-
- /* Normalize any picked txq_ix to [0, num_channels),
- * So we can return a txq_ix that matches the channel and
- * packet UP.
- */
- ch_ix = priv->txq2sq[txq_ix]->ch_ix;
-
- return priv->channel_tc2realtxq[ch_ix][up];
-}
-
static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
{
#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ pi = mlx5e_txqsq_get_next_pi(sq, sq->max_sq_mpw_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
net_prefetchw(wqe->data);
mlx5e_tx_skb_update_hwts_flags(skb);
- if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) {
/* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
cseg = mlx5e_tx_mpwqe_session_complete(sq);
struct mlx5e_txqsq *sq;
u16 pi;
+ /* All changes to txq2sq are performed in sync with mlx5e_xmit, when the
+ * queue being changed is disabled, and smp_wmb guarantees that the
+ * changes are visible before mlx5e_xmit tries to read from txq2sq. It
+ * guarantees that the value of txq2sq[qid] doesn't change while
+ * mlx5e_xmit is running on queue number qid. smb_wmb is paired with
+ * HARD_TX_LOCK around ndo_start_xmit, which serves as an ACQUIRE.
+ */
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
if (unlikely(!sq)) {
+ /* Two cases when sq can be NULL:
+ * 1. The HTB node is registered, and mlx5e_select_queue
+ * selected its queue ID, but the SQ itself is not yet created.
+ * 2. HTB SQ creation failed. Similar to the previous case, but
+ * the SQ won't be created.
+ */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
return NULL;
- if (!vid ||
- br_vlan_get_info(br_dev, vid, &vinfo) ||
- !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
+ if (!vid || br_vlan_get_info(br_dev, vid, &vinfo))
return NULL;
edev = br_fdb_find_port(br_dev, dmac, vid);
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
- if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev))
+ if (br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
#include "lan743x_main.h"
#include "lan743x_ethtool.h"
+#define MMD_ACCESS_ADDRESS 0
+#define MMD_ACCESS_WRITE 1
+#define MMD_ACCESS_READ 2
+#define MMD_ACCESS_READ_INC 3
+
+static void pci11x1x_strap_get_status(struct lan743x_adapter *adapter)
+{
+ u32 chip_rev;
+ u32 strap;
+
+ strap = lan743x_csr_read(adapter, STRAP_READ);
+ if (strap & STRAP_READ_USE_SGMII_EN_) {
+ if (strap & STRAP_READ_SGMII_EN_)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "STRAP_READ: 0x%08X\n", strap);
+ } else {
+ chip_rev = lan743x_csr_read(adapter, FPGA_REV);
+ if (chip_rev) {
+ if (chip_rev & FPGA_SGMII_OP)
+ adapter->is_sgmii_en = true;
+ else
+ adapter->is_sgmii_en = false;
+ netif_dbg(adapter, drv, adapter->netdev,
+ "FPGA_REV: 0x%08X\n", chip_rev);
+ } else {
+ adapter->is_sgmii_en = false;
+ }
+ }
+}
+
+static bool is_pci11x1x_chip(struct lan743x_adapter *adapter)
+{
+ struct lan743x_csr *csr = &adapter->csr;
+ u32 id_rev = csr->id_rev;
+
+ if (((id_rev & 0xFFFF0000) == ID_REV_ID_A011_) ||
+ ((id_rev & 0xFFFF0000) == ID_REV_ID_A041_)) {
+ return true;
+ }
+ return false;
+}
+
static void lan743x_pci_cleanup(struct lan743x_adapter *adapter)
{
pci_release_selected_regions(adapter->pdev,
}
}
if (int_sts & INT_BIT_ALL_TX_) {
- for (channel = 0; channel < LAN743X_USED_TX_CHANNELS;
+ for (channel = 0; channel < adapter->used_tx_channels;
channel++) {
u32 int_bit = INT_BIT_DMA_TX_(channel);
{
int index;
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < adapter->max_vector_count; index++) {
if (adapter->intr.vector_list[index].int_mask & int_mask)
return adapter->intr.vector_list[index].flags;
}
int index = 0;
lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_);
- lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
+ if (adapter->is_pci11x1x)
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x0000FFFF);
+ else
+ lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) {
+ for (index = 0; index < intr->number_of_vectors; index++) {
if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) {
lan743x_intr_unregister_isr(adapter, index);
intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index);
static int lan743x_intr_open(struct lan743x_adapter *adapter)
{
- struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT];
+ struct msix_entry msix_entries[PCI11X1X_MAX_VECTOR_COUNT];
struct lan743x_intr *intr = &adapter->intr;
+ unsigned int used_tx_channels;
u32 int_vec_en_auto_clr = 0;
+ u8 max_vector_count;
u32 int_vec_map0 = 0;
u32 int_vec_map1 = 0;
int ret = -ENODEV;
intr->number_of_vectors = 0;
/* Try to set up MSIX interrupts */
+ max_vector_count = adapter->max_vector_count;
memset(&msix_entries[0], 0,
- sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT);
- for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++)
+ sizeof(struct msix_entry) * max_vector_count);
+ for (index = 0; index < max_vector_count; index++)
msix_entries[index].entry = index;
+ used_tx_channels = adapter->used_tx_channels;
ret = pci_enable_msix_range(adapter->pdev,
msix_entries, 1,
- 1 + LAN743X_USED_TX_CHANNELS +
+ 1 + used_tx_channels +
LAN743X_USED_RX_CHANNELS);
if (ret > 0) {
lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD);
lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD);
- lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
- lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ if (adapter->is_pci11x1x) {
+ lan743x_csr_write(adapter, INT_MOD_CFG8, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_CFG9, LAN743X_INT_MOD);
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00007654);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00003210);
+ } else {
+ lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432);
+ lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001);
+ }
lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF);
}
if (intr->number_of_vectors > 1) {
int number_of_tx_vectors = intr->number_of_vectors - 1;
- if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS)
- number_of_tx_vectors = LAN743X_USED_TX_CHANNELS;
+ if (number_of_tx_vectors > used_tx_channels)
+ number_of_tx_vectors = used_tx_channels;
flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK |
INT_VEC_EN_(vector));
}
}
- if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) {
+ if ((intr->number_of_vectors - used_tx_channels) > 1) {
int number_of_rx_vectors = intr->number_of_vectors -
- LAN743X_USED_TX_CHANNELS - 1;
+ used_tx_channels - 1;
if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS)
number_of_rx_vectors = LAN743X_USED_RX_CHANNELS;
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
}
for (index = 0; index < number_of_rx_vectors; index++) {
- int vector = index + 1 + LAN743X_USED_TX_CHANNELS;
+ int vector = index + 1 + used_tx_channels;
u32 int_bit = INT_BIT_DMA_RX_(index);
/* map RX interrupt to vector */
return ret;
}
+static u32 lan743x_mac_mmd_access(int id, int index, int op)
+{
+ u16 dev_addr;
+ u32 ret;
+
+ dev_addr = (index >> 16) & 0x1f;
+ ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
+ MAC_MII_ACC_PHY_ADDR_MASK_;
+ ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
+ MAC_MII_ACC_MIIMMD_MASK_;
+ if (op == MMD_ACCESS_WRITE)
+ ret |= MAC_MII_ACC_MIICMD_WRITE_;
+ else if (op == MMD_ACCESS_READ)
+ ret |= MAC_MII_ACC_MIICMD_READ_;
+ else if (op == MMD_ACCESS_READ_INC)
+ ret |= MAC_MII_ACC_MIICMD_READ_INC_;
+ else
+ ret |= MAC_MII_ACC_MIICMD_ADDR_;
+ ret |= (MAC_MII_ACC_MII_BUSY_ | MAC_MII_ACC_MIICL45_);
+
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* comfirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
+ }
+
+ ret = lan743x_mdiobus_read(bus, phy_id, index);
+ return ret;
+}
+
+static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
+{
+ struct lan743x_adapter *adapter = bus->priv;
+ u32 mmd_access;
+ int ret;
+
+ /* confirm MII not busy */
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ if (index & MII_ADDR_C45) {
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, index,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ } else {
+ ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
+ }
+
+ return ret;
+}
+
static void lan743x_mac_set_address(struct lan743x_adapter *adapter,
u8 *addr)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
int index;
- lan743x_tx_close(&adapter->tx[0]);
+ for (index = 0; index < adapter->used_tx_channels; index++)
+ lan743x_tx_close(&adapter->tx[index]);
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++)
lan743x_rx_close(&adapter->rx[index]);
goto close_rx;
}
- ret = lan743x_tx_open(&adapter->tx[0]);
- if (ret)
- goto close_rx;
-
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ ret = lan743x_tx_open(&adapter->tx[index]);
+ if (ret)
+ goto close_tx;
+ }
return 0;
+close_tx:
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ if (adapter->tx[index].ring_cpu_ptr)
+ lan743x_tx_close(&adapter->tx[index]);
+ }
+
close_rx:
for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
if (adapter->rx[index].ring_cpu_ptr)
struct net_device *netdev)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u8 ch = 0;
- return lan743x_tx_xmit_frame(&adapter->tx[0], skb);
+ if (adapter->is_pci11x1x)
+ ch = skb->queue_mapping % PCI11X1X_USED_TX_CHANNELS;
+
+ return lan743x_tx_xmit_frame(&adapter->tx[ch], skb);
}
static int lan743x_netdev_ioctl(struct net_device *netdev,
int index;
int ret;
+ adapter->is_pci11x1x = is_pci11x1x_chip(adapter);
+ if (adapter->is_pci11x1x) {
+ adapter->max_tx_channels = PCI11X1X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = PCI11X1X_USED_TX_CHANNELS;
+ adapter->max_vector_count = PCI11X1X_MAX_VECTOR_COUNT;
+ pci11x1x_strap_get_status(adapter);
+ } else {
+ adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS;
+ adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS;
+ adapter->max_vector_count = LAN743X_MAX_VECTOR_COUNT;
+ }
+
adapter->intr.irq = adapter->pdev->irq;
lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
adapter->rx[index].channel_number = index;
}
- tx = &adapter->tx[0];
- tx->adapter = adapter;
- tx->channel_number = 0;
- spin_lock_init(&tx->ring_lock);
+ for (index = 0; index < adapter->used_tx_channels; index++) {
+ tx = &adapter->tx[index];
+ tx->adapter = adapter;
+ tx->channel_number = index;
+ spin_lock_init(&tx->ring_lock);
+ }
+
return 0;
}
static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
{
+ u32 sgmii_ctl;
int ret;
adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
}
adapter->mdiobus->priv = (void *)adapter;
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
- adapter->mdiobus->name = "lan743x-mdiobus";
+ if (adapter->is_pci11x1x) {
+ if (adapter->is_sgmii_en) {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "SGMII operation\n");
+ } else {
+ sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL);
+ sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_;
+ sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_;
+ lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "(R)GMII operation\n");
+ }
+
+ adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
+ adapter->mdiobus->read = lan743x_mdiobus_c45_read;
+ adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->name = "lan743x-mdiobus-c45";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n");
+ } else {
+ adapter->mdiobus->read = lan743x_mdiobus_read;
+ adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->name = "lan743x-mdiobus";
+ netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
+ }
+
snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE,
"pci-%s", pci_name(adapter->pdev));
struct net_device *netdev = NULL;
int ret = -ENODEV;
- netdev = devm_alloc_etherdev(&pdev->dev,
- sizeof(struct lan743x_adapter));
+ if (id->device == PCI_DEVICE_ID_SMSC_A011 ||
+ id->device == PCI_DEVICE_ID_SMSC_A041) {
+ netdev = devm_alloc_etherdev_mqs(&pdev->dev,
+ sizeof(struct lan743x_adapter),
+ PCI11X1X_USED_TX_CHANNELS,
+ LAN743X_USED_RX_CHANNELS);
+ } else {
+ netdev = devm_alloc_etherdev(&pdev->dev,
+ sizeof(struct lan743x_adapter));
+ }
+
if (!netdev)
goto return_error;
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A011) },
+ { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_A041) },
{ 0, }
};
#define ID_REV_ID_MASK_ (0xFFFF0000)
#define ID_REV_ID_LAN7430_ (0x74300000)
#define ID_REV_ID_LAN7431_ (0x74310000)
-#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
- (((id_rev) & 0xFFF00000) == 0x74300000)
+#define ID_REV_ID_LAN743X_ (0x74300000)
+#define ID_REV_ID_A011_ (0xA0110000) // PCI11010
+#define ID_REV_ID_A041_ (0xA0410000) // PCI11414
+#define ID_REV_ID_A0X1_ (0xA0010000)
+#define ID_REV_IS_VALID_CHIP_ID_(id_rev) \
+ ((((id_rev) & 0xFFF00000) == ID_REV_ID_LAN743X_) || \
+ (((id_rev) & 0xFF0F0000) == ID_REV_ID_A0X1_))
#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF)
#define ID_REV_CHIP_REV_A0_ (0x00000000)
#define ID_REV_CHIP_REV_B0_ (0x00000010)
#define FPGA_REV (0x04)
#define FPGA_REV_GET_MINOR_(fpga_rev) (((fpga_rev) >> 8) & 0x000000FF)
#define FPGA_REV_GET_MAJOR_(fpga_rev) ((fpga_rev) & 0x000000FF)
+#define FPGA_SGMII_OP BIT(24)
+
+#define STRAP_READ (0x0C)
+#define STRAP_READ_USE_SGMII_EN_ BIT(22)
+#define STRAP_READ_SGMII_EN_ BIT(6)
+#define STRAP_READ_SGMII_REFCLK_ BIT(5)
+#define STRAP_READ_SGMII_2_5G_ BIT(4)
+#define STRAP_READ_BASE_X_ BIT(3)
+#define STRAP_READ_RGMII_TXC_DELAY_EN_ BIT(2)
+#define STRAP_READ_RGMII_RXC_DELAY_EN_ BIT(1)
+#define STRAP_READ_ADV_PM_DISABLE_ BIT(0)
#define HW_CFG (0x010)
#define HW_CFG_RELOAD_TYPE_ALL_ (0x00000FC0)
#define MAC_RX_ADDRL (0x11C)
#define MAC_MII_ACC (0x120)
+#define MAC_MII_ACC_MDC_CYCLE_SHIFT_ (16)
+#define MAC_MII_ACC_MDC_CYCLE_MASK_ (0x00070000)
+#define MAC_MII_ACC_MDC_CYCLE_2_5MHZ_ (0)
+#define MAC_MII_ACC_MDC_CYCLE_5MHZ_ (1)
+#define MAC_MII_ACC_MDC_CYCLE_12_5MHZ_ (2)
+#define MAC_MII_ACC_MDC_CYCLE_25MHZ_ (3)
+#define MAC_MII_ACC_MDC_CYCLE_1_25MHZ_ (4)
#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11)
#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800)
#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6)
#define MAC_MII_ACC_MII_WRITE_ (0x00000002)
#define MAC_MII_ACC_MII_BUSY_ BIT(0)
+#define MAC_MII_ACC_MIIMMD_SHIFT_ (6)
+#define MAC_MII_ACC_MIIMMD_MASK_ (0x000007C0)
+#define MAC_MII_ACC_MIICL45_ BIT(3)
+#define MAC_MII_ACC_MIICMD_MASK_ (0x00000006)
+#define MAC_MII_ACC_MIICMD_ADDR_ (0x00000000)
+#define MAC_MII_ACC_MIICMD_WRITE_ (0x00000002)
+#define MAC_MII_ACC_MIICMD_READ_ (0x00000004)
+#define MAC_MII_ACC_MIICMD_READ_INC_ (0x00000006)
+
#define MAC_MII_DATA (0x124)
#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130)
#define MAC_WUCSR2 (0x600)
+#define SGMII_CTL (0x728)
+#define SGMII_CTL_SGMII_ENABLE_ BIT(31)
+#define SGMII_CTL_LINK_STATUS_SOURCE_ BIT(8)
+#define SGMII_CTL_SGMII_POWER_DN_ BIT(1)
+
#define INT_STS (0x780)
#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel))
#define INT_BIT_ALL_RX_ (0x0F000000)
#define INT_MOD_CFG5 (0x7D4)
#define INT_MOD_CFG6 (0x7D8)
#define INT_MOD_CFG7 (0x7DC)
+#define INT_MOD_CFG8 (0x7E0)
+#define INT_MOD_CFG9 (0x7E4)
#define PTP_CMD_CTL (0x0A00)
#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6)
#define LAN743X_MAX_RX_CHANNELS (4)
#define LAN743X_MAX_TX_CHANNELS (1)
+#define PCI11X1X_MAX_TX_CHANNELS (4)
struct lan743x_adapter;
#define LAN743X_USED_RX_CHANNELS (4)
#define LAN743X_USED_TX_CHANNELS (1)
+#define PCI11X1X_USED_TX_CHANNELS (4)
#define LAN743X_INT_MOD (400)
#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS)
#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS)
#error Invalid LAN743X_USED_TX_CHANNELS
#endif
+#if (PCI11X1X_USED_TX_CHANNELS > PCI11X1X_MAX_TX_CHANNELS)
+#error Invalid PCI11X1X_USED_TX_CHANNELS
+#endif
/* PCI */
/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR
#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430)
#define PCI_DEVICE_ID_SMSC_LAN7431 (0x7431)
+#define PCI_DEVICE_ID_SMSC_A011 (0xA011)
+#define PCI_DEVICE_ID_SMSC_A041 (0xA041)
#define PCI_CONFIG_LENGTH (0x1000)
};
#define LAN743X_MAX_VECTOR_COUNT (8)
+#define PCI11X1X_MAX_VECTOR_COUNT (16)
struct lan743x_intr {
int flags;
unsigned int irq;
- struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT];
+ struct lan743x_vector vector_list[PCI11X1X_MAX_VECTOR_COUNT];
int number_of_vectors;
bool using_vectors;
u8 mac_address[ETH_ALEN];
struct lan743x_phy phy;
- struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS];
- struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS];
+ struct lan743x_tx tx[PCI11X1X_USED_TX_CHANNELS];
+ struct lan743x_rx rx[LAN743X_USED_RX_CHANNELS];
+ bool is_pci11x1x;
+ bool is_sgmii_en;
+ u8 max_tx_channels;
+ u8 used_tx_channels;
+ u8 max_vector_count;
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
- index++)
+ for (index = 0; index < adapter->used_tx_channels;
+ index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
false, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ON:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, false);
lan743x_ptp_set_sync_ts_insert(adapter, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- for (index = 0; index < LAN743X_MAX_TX_CHANNELS;
+ for (index = 0; index < adapter->used_tx_channels;
index++)
lan743x_tx_set_timestamping_mode(&adapter->tx[index],
true, true);
config LAN966X_SWITCH
tristate "Lan966x switch driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on HAS_IOMEM
depends on OF
depends on NET_SWITCHDEV
ip_hdr(skb)->protocol == IPPROTO_IGMP)
return false;
- if (skb->protocol == htons(ETH_P_IPV6) &&
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6) &&
ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) &&
!ipv6_mc_check_mld(skb))
return false;
const struct switchdev_obj_port_vlan *v = SWITCHDEV_OBJ_PORT_VLAN(obj);
struct lan966x *lan966x = port->lan966x;
- /* When adding a port to a vlan, we get a callback for the port but
- * also for the bridge. When get the callback for the bridge just bail
- * out. Then when the bridge is added to the vlan, then we get a
- * callback here but in this case the flags has set:
- * BRIDGE_VLAN_INFO_BRENTRY. In this case it means that the CPU
- * port is added to the vlan, so the broadcast frames and unicast frames
- * with dmac of the bridge should be foward to CPU.
- */
- if (netif_is_bridge_master(obj->orig_dev) &&
- !(v->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
if (!netif_is_bridge_master(obj->orig_dev))
lan966x_vlan_port_add_vlan(port, v->vid,
v->flags & BRIDGE_VLAN_INFO_PVID,
stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
- for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ idx];
stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
struct sparx5_port *port = netdev_priv(dev);
if (netif_is_bridge_master(dev)) {
- if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
- struct sparx5 *sparx5 =
- container_of(nb, struct sparx5,
- switchdev_blocking_nb);
+ struct sparx5 *sparx5 =
+ container_of(nb, struct sparx5,
+ switchdev_blocking_nb);
- sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
- }
+ sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
return 0;
}
}
priv->tx_buf_base = kmalloc_array(priv->tx_buf_size, TX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->tx_buf_base) {
ret = -ENOMEM;
goto init_fail;
}
priv->rx_buf_base = kmalloc_array(priv->rx_buf_size, RX_DESC_NUM,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!priv->rx_buf_base) {
ret = -ENOMEM;
goto init_fail;
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool del_pvid = false;
int err;
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ del_pvid = true;
+
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_EV_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_GEN_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
-static int ocelot_trap_add(struct ocelot *ocelot, int port,
- unsigned long cookie,
- void (*populate)(struct ocelot_vcap_filter *f))
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f))
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
trap->action.cpu_copy_ena = true;
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
+ trap->take_ts = take_ts;
+ list_add_tail(&trap->trap_list, &ocelot->traps);
new = true;
}
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask)
+ if (!trap->ingress_port_mask) {
+ list_del(&trap->trap_list);
kfree(trap);
+ }
return err;
}
return 0;
}
-static int ocelot_trap_del(struct ocelot *ocelot, int port,
- unsigned long cookie)
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
{
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
return 0;
trap->ingress_port_mask &= ~BIT(port);
- if (!trap->ingress_port_mask)
+ if (!trap->ingress_port_mask) {
+ list_del(&trap->trap_list);
+
return ocelot_vcap_filter_del(ocelot, trap);
+ }
return ocelot_vcap_filter_replace(ocelot, trap);
}
static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
- return ocelot_trap_add(ocelot, port, l2_cookie,
+ return ocelot_trap_add(ocelot, port, l2_cookie, true,
ocelot_populate_l2_ptp_trap_key);
}
static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+ unsigned long l2_cookie = OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot);
return ocelot_trap_del(ocelot, port, l2_cookie);
}
static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
- err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie, true,
ocelot_populate_ipv4_ptp_event_trap_key);
if (err)
return err;
- err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie, false,
ocelot_populate_ipv4_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
- unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ unsigned long ipv4_gen_cookie = OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv4_ev_cookie = OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
- err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie, true,
ocelot_populate_ipv6_ptp_event_trap_key);
if (err)
return err;
- err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie, false,
ocelot_populate_ipv6_ptp_general_trap_key);
if (err)
ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
{
- unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
- unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ unsigned long ipv6_gen_cookie = OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot);
+ unsigned long ipv6_ev_cookie = OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot);
int err;
err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
}
EXPORT_SYMBOL(ocelot_get_strings);
-static void ocelot_update_stats(struct ocelot *ocelot)
+/* Caller must hold &ocelot->stats_lock */
+static int ocelot_port_update_stats(struct ocelot *ocelot, int port)
{
- int i, j;
-
- mutex_lock(&ocelot->stats_lock);
+ unsigned int idx = port * ocelot->num_stats;
+ struct ocelot_stats_region *region;
+ int err, j;
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- /* Configure the port to read the stats from */
- ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
+ /* Configure the port to read the stats from */
+ ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port), SYS_STAT_CFG);
- for (j = 0; j < ocelot->num_stats; j++) {
- u32 val;
- unsigned int idx = i * ocelot->num_stats + j;
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ err = ocelot_bulk_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
+ region->offset, region->buf,
+ region->count);
+ if (err)
+ return err;
- val = ocelot_read_rix(ocelot, SYS_COUNT_RX_OCTETS,
- ocelot->stats_layout[j].offset);
+ for (j = 0; j < region->count; j++) {
+ u64 *stat = &ocelot->stats[idx + j];
+ u64 val = region->buf[j];
- if (val < (ocelot->stats[idx] & U32_MAX))
- ocelot->stats[idx] += (u64)1 << 32;
+ if (val < (*stat & U32_MAX))
+ *stat += (u64)1 << 32;
- ocelot->stats[idx] = (ocelot->stats[idx] &
- ~(u64)U32_MAX) + val;
+ *stat = (*stat & ~(u64)U32_MAX) + val;
}
+
+ idx += region->count;
}
- mutex_unlock(&ocelot->stats_lock);
+ return err;
}
static void ocelot_check_stats_work(struct work_struct *work)
struct delayed_work *del_work = to_delayed_work(work);
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
+ int i, err;
+
+ mutex_lock(&ocelot->stats_lock);
+ for (i = 0; i < ocelot->num_phys_ports; i++) {
+ err = ocelot_port_update_stats(ocelot, i);
+ if (err)
+ break;
+ }
+ mutex_unlock(&ocelot->stats_lock);
- ocelot_update_stats(ocelot);
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
- int i;
+ int i, err;
+
+ mutex_lock(&ocelot->stats_lock);
/* check and update now */
- ocelot_update_stats(ocelot);
+ err = ocelot_port_update_stats(ocelot, port);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
+
+ mutex_unlock(&ocelot->stats_lock);
+
+ if (err)
+ dev_err(ocelot->dev, "Error %d updating ethtool stats\n", err);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
}
EXPORT_SYMBOL(ocelot_get_sset_count);
+static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
+{
+ struct ocelot_stats_region *region = NULL;
+ unsigned int last;
+ int i;
+
+ INIT_LIST_HEAD(&ocelot->stats_regions);
+
+ for (i = 0; i < ocelot->num_stats; i++) {
+ if (region && ocelot->stats_layout[i].offset == last + 1) {
+ region->count++;
+ } else {
+ region = devm_kzalloc(ocelot->dev, sizeof(*region),
+ GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+
+ region->offset = ocelot->stats_layout[i].offset;
+ region->count = 1;
+ list_add_tail(®ion->node, &ocelot->stats_regions);
+ }
+
+ last = ocelot->stats_layout[i].offset;
+ }
+
+ list_for_each_entry(region, &ocelot->stats_regions, node) {
+ region->buf = devm_kcalloc(ocelot->dev, region->count,
+ sizeof(*region->buf), GFP_KERNEL);
+ if (!region->buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info)
{
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
+ ret = ocelot_prepare_stats_regions(ocelot);
+ if (ret) {
+ destroy_workqueue(ocelot->stats_queue);
+ destroy_workqueue(ocelot->owq);
+ return ret;
+ }
+
INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ana.h>
#include <soc/mscc/ocelot_ptp.h>
+#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot.h>
#include "ocelot_rew.h"
#include "ocelot_qs.h"
enum devlink_port_flavour flavour);
void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
+int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie, bool take_ts,
+ void (*populate)(struct ocelot_vcap_filter *f));
+int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
filter->action.cpu_copy_ena = true;
filter->action.cpu_qu_num = 0;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ list_add_tail(&filter->trap_list, &ocelot->traps);
break;
case FLOW_ACTION_POLICE:
if (filter->block_id == PSFP_BLOCK_ID) {
ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
+ if (!list_empty(&filter->trap_list))
+ list_del(&filter->trap_list);
kfree(filter);
return ret;
}
#include "ocelot.h"
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
+ int count)
+{
+ u16 target = reg >> TARGET_OFFSET;
+
+ WARN_ON(!target);
+
+ return regmap_bulk_read(ocelot->targets[target],
+ ocelot->map[target][reg & REG_MASK] + offset,
+ buf, count);
+}
+EXPORT_SYMBOL_GPL(__ocelot_bulk_read_ix);
+
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset)
{
u16 target = reg >> TARGET_OFFSET;
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
filter->prio = 1;
- filter->id.cookie = src_port;
+ filter->id.cookie = OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, src_port);
filter->id.tc_offload = false;
filter->block_id = VCAP_IS2;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
return err;
}
-static int ocelot_mrp_copy_add_vcap(struct ocelot *ocelot, int port,
- int prio, unsigned long cookie)
+static void ocelot_populate_mrp_trap_key(struct ocelot_vcap_filter *filter)
{
const u8 mrp_mask[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
- struct ocelot_vcap_filter *filter;
- int err;
-
- filter = kzalloc(sizeof(*filter), GFP_KERNEL);
- if (!filter)
- return -ENOMEM;
- filter->key_type = OCELOT_VCAP_KEY_ETYPE;
- filter->prio = prio;
- filter->id.cookie = cookie;
- filter->id.tc_offload = false;
- filter->block_id = VCAP_IS2;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
- filter->ingress_port_mask = BIT(port);
/* Here is possible to use control or test dmac because the mask
* doesn't cover the LSB
*/
ether_addr_copy(filter->key.etype.dmac.value, mrp_test_dmac);
ether_addr_copy(filter->key.etype.dmac.mask, mrp_mask);
- filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
- filter->action.port_mask = 0x0;
- filter->action.cpu_copy_ena = true;
- filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+}
- err = ocelot_vcap_filter_add(ocelot, filter, NULL);
- if (err)
- kfree(filter);
+static int ocelot_mrp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
- return err;
+ return ocelot_trap_add(ocelot, port, cookie, false,
+ ocelot_populate_mrp_trap_key);
+}
+
+static int ocelot_mrp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS2_MRP_TRAP(ocelot);
+
+ return ocelot_trap_del(ocelot, port, cookie);
}
static void ocelot_mrp_save_mac(struct ocelot *ocelot,
ocelot_mrp_save_mac(ocelot, ocelot_port);
if (mrp->ring_role != BR_MRP_RING_ROLE_MRC)
- return ocelot_mrp_copy_add_vcap(ocelot, port, 1, port);
+ return ocelot_mrp_trap_add(ocelot, port);
dst_port = ocelot_mrp_find_partner_port(ocelot, ocelot_port);
if (dst_port == -1)
if (err)
return err;
- err = ocelot_mrp_copy_add_vcap(ocelot, port, 2,
- port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_add(ocelot, port);
if (err) {
- ocelot_mrp_del_vcap(ocelot, port);
+ ocelot_mrp_del_vcap(ocelot,
+ OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
return err;
}
const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- int i;
+ int err, i;
if (!ocelot_port)
return -EOPNOTSUPP;
if (ocelot_port->mrp_ring_id != mrp->ring_id)
return 0;
- ocelot_mrp_del_vcap(ocelot, port);
- ocelot_mrp_del_vcap(ocelot, port + ocelot->num_phys_ports);
+ err = ocelot_mrp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ ocelot_mrp_del_vcap(ocelot, OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port));
for (i = 0; i < ocelot->num_phys_ports; ++i) {
ocelot_port = ocelot->ports[i];
}
INIT_LIST_HEAD(&ocelot->dummy_rules);
+ INIT_LIST_HEAD(&ocelot->traps);
INIT_LIST_HEAD(&ocelot->vcap_pol.pol_list);
return 0;
return true;
if (netif_is_gretap(netdev))
return true;
+ if (netif_is_ip6gretap(netdev))
+ return true;
return false;
}
struct nfp_flower_repr_priv *repr_priv;
struct nfp_tun_offloaded_mac *entry;
struct nfp_repr *repr;
+ u16 nfp_mac_idx;
int ida_idx;
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
- u16 nfp_mac_idx;
-
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
- u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ else
+ nfp_mac_idx = entry->index;
+
/* If MAC has global ID then extract and free the ida entry. */
- if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
}
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
port->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
}
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
- bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
priv->ndev->name, vlan->vid, vlan->flags);
- if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
- return 0;
-
return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(&rt->dst);
return -EMSGSIZE;
}
}
skb->protocol = eth_type_trans(skb, geneve->dev);
- netif_rx(skb);
+ __netif_rx(skb);
dst_release(dst);
return -EMSGSIZE;
}
dev_sw_netstats_rx_add(pctx->dev, skb->len);
- netif_rx(skb);
+ __netif_rx(skb);
return 0;
err:
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
ca8210_hw->phy->cca_ed_level = -9800;
ca8210_hw->phy->symbol_duration = 16;
- ca8210_hw->phy->lifs_period = 40;
- ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
ca8210_hw->flags =
IEEE802154_HW_AFILT |
IEEE802154_HW_OMIT_CKSUM |
skb_orphan(skb);
- /* Before queueing this packet to netif_rx(),
+ /* Before queueing this packet to __netif_rx(),
* make sure dst is refcounted.
*/
skb_dst_force(skb);
skb->protocol = eth_type_trans(skb, dev);
len = skb->len;
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
dev_lstats_add(dev, len);
return NETDEV_TX_OK;
else
nskb->pkt_type = PACKET_MULTICAST;
- netif_rx(nskb);
+ __netif_rx(nskb);
}
continue;
}
nskb->dev = ndev;
- if (netif_rx(nskb) == NET_RX_SUCCESS) {
+ if (__netif_rx(nskb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUntagged++;
u64_stats_update_end(&secy_stats->syncp);
macsec_reset_skb(nskb, macsec->secy.netdev);
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
if (ret == NET_RX_SUCCESS) {
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsUnknownSCI++;
if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, dev->dev_addr))
nskb->pkt_type = PACKET_HOST;
- ret = netif_rx(nskb);
+ ret = __netif_rx(nskb);
macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, false);
}
/* forward to original port. */
vlan = src;
ret = macvlan_broadcast_one(skb, vlan, eth, 0) ?:
- netif_rx(skb);
+ __netif_rx(skb);
handle_res = RX_HANDLER_CONSUMED;
goto out;
}
mctp_serial_push(dev, c[i]);
}
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
int idx = dev->idx;
unregister_netdev(dev->netdev);
- cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
{ .compatible = "aspeed,ast2600-mdio", },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
static struct platform_driver aspeed_mdio_driver = {
.driver = {
u64_stats_inc(&mhi_netdev->stats.rx_packets);
u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
- netif_rx(skb);
+ __netif_rx(skb);
}
/* Refill if RX buffers queue becomes low */
if (err)
goto err_fib6_rt_nh_del;
- fib6_event->rt_arr[i]->trap = true;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
- fib6_event->rt_arr[i]->trap = false;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
- if (netif_rx(skb) == NET_RX_DROP) {
+ if (__netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
else
mscr = 0;
- return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1121_PHY_MSCR_REG,
- MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
+ return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
+ changed = err;
+
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- changed = err;
+ changed |= err;
err = genphy_config_aneg(phydev);
if (err < 0)
{
int err;
- err = genphy_soft_reset(phydev);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ err = genphy_config_aneg(phydev);
if (err < 0)
return err;
- err = genphy_config_aneg(phydev);
- return 0;
+ return genphy_soft_reset(phydev);
}
static int m88e1118_config_init(struct phy_device *phydev)
static int mt7531_phy_config_init(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
-
mtk_gephy_config_init(phydev);
/* PHY link down power saving enable */
skb_put(rnet->rx_skb[i], RIO_MAX_MSG_SIZE);
rnet->rx_skb[i]->protocol =
eth_type_trans(rnet->rx_skb[i], ndev);
- error = netif_rx(rnet->rx_skb[i]);
+ error = __netif_rx(rnet->rx_skb[i]);
if (error == NET_RX_DROP) {
ndev->stats.rx_dropped++;
/* datagram completed: send to upper level */
skb_trim(skb, dlen);
- netif_rx(skb);
+ __netif_rx(skb);
stats->rx_bytes+=dlen;
stats->rx_packets++;
lp->rx_skb[ns] = NULL;
u16 hdr_off;
u32 *pkt_hdr;
- /* This check is no longer done by usbnet */
- if (skb->len < dev->net->hard_header_len)
+ /* At the end of the SKB, there's a header telling us how many packets
+ * are bundled into this buffer and where we can find an array of
+ * per-packet metadata (which contains elements encoded into u16).
+ */
+ if (skb->len < 4)
return 0;
-
skb_trim(skb, skb->len - 4);
rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
-
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
+
+ if (pkt_cnt == 0)
+ return 0;
+
+ /* Make sure that the bounds of the metadata array are inside the SKB
+ * (and in front of the counter at the end).
+ */
+ if (pkt_cnt * 2 + hdr_off > skb->len)
+ return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
- while (pkt_cnt--) {
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, hdr_off);
+
+ for (; ; pkt_cnt--, pkt_hdr++) {
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
- /* Check CRC or runt packet */
- if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
- (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
- continue;
- }
-
- if (pkt_cnt == 0) {
- skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(skb, 2);
- skb_set_tail_pointer(skb, skb->len);
- skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(skb, pkt_hdr);
- return 1;
- }
+ if (pkt_len > skb->len)
+ return 0;
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
+ /* Check CRC or runt packet */
+ if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
+ pkt_len >= 2 + ETH_HLEN) {
+ bool last = (pkt_cnt == 0);
+
+ if (last) {
+ ax_skb = skb;
+ } else {
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
+ return 0;
+ }
ax_skb->len = pkt_len;
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
+
+ if (last)
+ return 1;
+
usbnet_skb_return(dev, ax_skb);
- } else {
- return 0;
}
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
+ /* Trim this packet away from the SKB */
+ if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ return 0;
}
- return 1;
}
static struct sk_buff *
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
.idProduct = 0x9032, /* SL-6000 */
ZAURUS_MASTER_INTERFACE,
.driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FN990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
+ unsigned int len;
int nframes;
int x;
- int offset;
+ unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
break;
}
- /* sanity checking */
- if (((offset + len) > skb_in->len) ||
+ /* sanity checking - watch out for integer wrap*/
+ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
{
struct hso_device *hso_dev;
- hso_dev = kzalloc(sizeof(*hso_dev), GFP_ATOMIC);
+ hso_dev = kzalloc(sizeof(*hso_dev), GFP_KERNEL);
if (!hso_dev)
return NULL;
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
.idProduct = 0x9032, /* SL-6000 */
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
- if (!rq->rx_notify_masked) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (!READ_ONCE(rq->rx_notify_masked) &&
+ napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
}
}
{
return __dev_forward_skb(dev, skb) ?: xdp ?
veth_xdp_rx(rq, skb) :
- netif_rx(skb);
+ __netif_rx(skb);
}
/* return true if the specified skb has chances of GRO aggregation
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
+ }
}
}
NULL, virtnet_cpu_dead);
if (ret)
goto err_dead;
-
- ret = register_virtio_driver(&virtio_net_driver);
+ ret = register_virtio_driver(&virtio_net_driver);
if (ret)
goto err_virtio;
return 0;
skb->protocol = eth_type_trans(skb, dev);
- if (likely(netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
vrf_rx_stats(dev, len);
else
this_cpu_inc(dev->dstats->rx_drps);
tx_stats->tx_bytes += len;
u64_stats_update_end(&tx_stats->syncp);
- if (netif_rx(skb) == NET_RX_SUCCESS) {
+ if (__netif_rx(skb) == NET_RX_SUCCESS) {
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += len;
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_sdio_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
.ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 12,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca988x_ops,
.decap_align_bytes = 4,
.spectral_bin_discard = 0,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
+ .rx_desc_ops = &qca988x_rx_desc_ops,
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
.hw_ops = &qca99x0_ops,
.decap_align_bytes = 1,
.spectral_bin_discard = 4,
.dir = WCN3990_HW_1_0_FW_DIR,
},
.sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &wcn3990_rx_desc_ops,
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_TLV_NUM_PEERS,
HTT_T2H_MSG_TYPE_PEER_STATS,
};
+const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload)
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return MS(__le32_to_cpu(rx_desc->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return !!(rx_desc->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
+}
+
+const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload),
+
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+};
+
+static void ath10k_rx_desc_wcn3990_get_offsets(struct htt_rx_ring_rx_desc_offsets *off)
+{
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v2, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static struct htt_rx_desc *
+ath10k_rx_desc_wcn3990_from_raw_buffer(void *buff)
+{
+ return &((struct htt_rx_desc_v2 *)buff)->base;
+}
+
+static struct rx_attention *
+ath10k_rx_desc_wcn3990_get_attention(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->attention;
+}
+
+static struct rx_frag_info_common *
+ath10k_rx_desc_wcn3990_get_frag_info(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->frag_info.common;
+}
+
+static struct rx_mpdu_start *
+ath10k_rx_desc_wcn3990_get_mpdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_start;
+}
+
+static struct rx_mpdu_end *
+ath10k_rx_desc_wcn3990_get_mpdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_end;
+}
+
+static struct rx_msdu_start_common *
+ath10k_rx_desc_wcn3990_get_msdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_start.common;
+}
+
+static struct rx_msdu_end_common *
+ath10k_rx_desc_wcn3990_get_msdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_end.common;
+}
+
+static struct rx_ppdu_start *
+ath10k_rx_desc_wcn3990_get_ppdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_start;
+}
+
+static struct rx_ppdu_end_common *
+ath10k_rx_desc_wcn3990_get_ppdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_end.common;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_rx_hdr_status(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->rx_hdr_status;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_msdu_payload(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->msdu_payload;
+}
+
+const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v2),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v2, msdu_payload),
+
+ .rx_desc_from_raw_buffer = ath10k_rx_desc_wcn3990_from_raw_buffer,
+ .rx_desc_get_offsets = ath10k_rx_desc_wcn3990_get_offsets,
+ .rx_desc_get_attention = ath10k_rx_desc_wcn3990_get_attention,
+ .rx_desc_get_frag_info = ath10k_rx_desc_wcn3990_get_frag_info,
+ .rx_desc_get_mpdu_start = ath10k_rx_desc_wcn3990_get_mpdu_start,
+ .rx_desc_get_mpdu_end = ath10k_rx_desc_wcn3990_get_mpdu_end,
+ .rx_desc_get_msdu_start = ath10k_rx_desc_wcn3990_get_msdu_start,
+ .rx_desc_get_msdu_end = ath10k_rx_desc_wcn3990_get_msdu_end,
+ .rx_desc_get_ppdu_start = ath10k_rx_desc_wcn3990_get_ppdu_start,
+ .rx_desc_get_ppdu_end = ath10k_rx_desc_wcn3990_get_ppdu_end,
+ .rx_desc_get_rx_hdr_status = ath10k_rx_desc_wcn3990_get_rx_hdr_status,
+ .rx_desc_get_msdu_payload = ath10k_rx_desc_wcn3990_get_msdu_payload,
+};
+
int ath10k_htt_connect(struct ath10k_htt *htt)
{
struct ath10k_htc_svc_conn_req conn_req;
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
-struct htt_rx_ring_setup_ring32 {
- __le32 fw_idx_shadow_reg_paddr;
- __le32 rx_ring_base_paddr;
- __le16 rx_ring_len; /* in 4-byte words */
- __le16 rx_ring_bufsize; /* rx skb size - in bytes */
- __le16 flags; /* %HTT_RX_RING_FLAGS_ */
- __le16 fw_idx_init_val;
-
+struct htt_rx_ring_rx_desc_offsets {
/* the following offsets are in 4-byte units */
__le16 mac80211_hdr_offset;
__le16 msdu_payload_offset;
__le16 frag_info_offset;
} __packed;
+struct htt_rx_ring_setup_ring32 {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ struct htt_rx_ring_rx_desc_offsets offsets;
+} __packed;
+
struct htt_rx_ring_setup_ring64 {
__le64 fw_idx_shadow_reg_paddr;
__le64 rx_ring_base_paddr;
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
__le16 fw_idx_init_val;
- /* the following offsets are in 4-byte units */
- __le16 mac80211_hdr_offset;
- __le16 msdu_payload_offset;
- __le16 ppdu_start_offset;
- __le16 ppdu_end_offset;
- __le16 mpdu_start_offset;
- __le16 mpdu_end_offset;
- __le16 msdu_start_offset;
- __le16 msdu_end_offset;
- __le16 rx_attention_offset;
- __le16 frag_info_offset;
+ struct htt_rx_ring_rx_desc_offsets offsets;
} __packed;
struct htt_rx_ring_setup_hdr {
return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
}
+/* the driver strongly assumes that the rx header status be 64 bytes long,
+ * so all possible rx_desc structures must respect this assumption.
+ */
#define RX_HTT_HDR_STATUS_LEN 64
-/* This structure layout is programmed via rx ring setup
+/* The rx descriptor structure layout is programmed via rx ring setup
* so that FW knows how to transfer the rx descriptor to the host.
- * Buffers like this are placed on the rx ring.
+ * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
+ * when modifying the structure layout of the rx descriptor beyond what it expects
+ * (even if it correctly programmed during the rx ring setup).
+ * Therefore we must keep two different memory layouts, abstract the rx descriptor
+ * representation and use ath10k_rx_desc_ops
+ * for correctly accessing rx descriptor data.
*/
+
+/* base struct used for abstracting the rx descritor representation */
struct htt_rx_desc {
union {
/* This field is filled on the host using the msdu buffer
struct fw_rx_desc_base fw_desc;
u32 pad;
} __packed;
+} __packed;
+
+/* rx descriptor for wcn3990 and possibly extensible for newer cards
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v2 {
+ struct htt_rx_desc base;
struct {
struct rx_attention attention;
struct rx_frag_info frag_info;
u8 msdu_payload[];
};
+/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
+ * works correctly. We keep a single rx descriptor for all these three
+ * families of cards because from tests it seems to be the most stable solution,
+ * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
+ * during some tests.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v1 {
+ struct htt_rx_desc base;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info_v1 frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start_v1 msdu_start;
+ struct rx_msdu_end_v1 msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end_v1 ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+};
+
+/* rx_desc abstraction */
+struct ath10k_htt_rx_desc_ops {
+ /* These fields are mandatory, they must be specified in any instance */
+
+ /* sizeof() of the rx_desc structure used by this hw */
+ size_t rx_desc_size;
+
+ /* offset of msdu_payload inside the rx_desc structure used by this hw */
+ size_t rx_desc_msdu_payload_offset;
+
+ /* These fields are options.
+ * When a field is not provided the default implementation gets used
+ * (see the ath10k_rx_desc_* operations below for more info about the defaults)
+ */
+ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+
+ /* Safely cast from a void* buffer containing an rx descriptor
+ * to the proper rx_desc structure
+ */
+ struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
+
+ void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
+ struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
+ struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
+ struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
+ struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
+
+static inline int
+ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
+ return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
+static inline bool
+ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
+ return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
+ return false;
+}
+
+/* The default implementation of all these getters is using the old rx_desc,
+ * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
+ * But probably, if new wireless cards must be supported, it would be better
+ * to switch the default implementation to the new rx_desc, since this would
+ * make the extension easier .
+ */
+static inline struct htt_rx_desc *
+ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
+{
+ if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
+ return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
+ return &((struct htt_rx_desc_v1 *)buff)->base;
+}
+
+static inline void
+ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_rx_desc_offsets *off)
+{
+ if (hw->rx_desc_ops->rx_desc_get_offsets) {
+ hw->rx_desc_ops->rx_desc_get_offsets(off);
+ } else {
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+ }
+}
+
+static inline struct rx_attention *
+ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_attention)
+ return hw->rx_desc_ops->rx_desc_get_attention(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->attention;
+}
+
+static inline struct rx_frag_info_common *
+ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_frag_info)
+ return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->frag_info.common;
+}
+
+static inline struct rx_mpdu_start *
+ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_start;
+}
+
+static inline struct rx_mpdu_end *
+ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_end;
+}
+
+static inline struct rx_msdu_start_common *
+ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_start)
+ return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_start.common;
+}
+
+static inline struct rx_msdu_end_common *
+ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_end)
+ return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_end.common;
+}
+
+static inline struct rx_ppdu_start *
+ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_start;
+}
+
+static inline struct rx_ppdu_end_common *
+ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_end.common;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
+ return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->rx_hdr_status;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
+ return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->msdu_payload;
+}
+
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
* rounded up to a cache line size.
*/
#define HTT_RX_BUF_SIZE 2048
-#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
+ * because it depends on the underlying device rx_desc representation
+ */
+static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
+{
+ return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
+}
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely.
#define HTT_RX_RING_REFILL_RESCHED_MS 5
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+/* shortcut to interpret a raw memory buffer as a rx descriptor */
+#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
+
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
+ struct ath10k_hw_params *hw = &htt->ar->hw_params;
struct htt_rx_desc *rx_desc;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb;
skb->data);
/* Clear rx_desc attention word before posting to Rx ring */
- rx_desc = (struct htt_rx_desc *)skb->data;
- rx_desc->attention.flags = __cpu_to_le32(0);
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
+ ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
paddr = dma_map_single(htt->ar->dev, skb->data,
skb->len + skb_tailroom(skb),
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
+ struct rx_attention *rx_desc_attention;
+ struct rx_frag_info_common *rx_desc_frag_info_common;
+ struct rx_msdu_start_common *rx_desc_msdu_start_common;
+ struct rx_msdu_end_common *rx_desc_msdu_end_common;
lockdep_assert_held(&htt->rx_ring.lock);
__skb_queue_tail(amsdu, msdu);
- rx_desc = (struct htt_rx_desc *)msdu->data;
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
+ rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
+ rx_desc);
+ rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
+ rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
/* FIXME: we must report msdu payload since this is what caller
* expects now
*/
- skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
- skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
/*
* Sanity check - confirm the HW is finished filling in the
* To prevent the case that we handle a stale Rx descriptor,
* just assert for now until we have a way to recover.
*/
- if (!(__le32_to_cpu(rx_desc->attention.flags)
+ if (!(__le32_to_cpu(rx_desc_attention->flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
__skb_queue_purge(amsdu);
return -EIO;
}
- msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
- msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
+ msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
- msdu_chained = rx_desc->frag_info.ring2_more_count;
+ msdu_chained = rx_desc_frag_info_common->ring2_more_count;
if (msdu_len_invalid)
msdu_len = 0;
skb_trim(msdu, 0);
- skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
msdu_len -= msdu->len;
/* Note: Chained buffers do not contain rx descriptor */
msdu_chaining = 1;
}
- last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
+ last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
- trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
- sizeof(*rx_desc) - sizeof(u32));
+ /* FIXME: why are we skipping the first part of the rx_desc? */
+ trace_ath10k_htt_rx_desc(ar, rx_desc + sizeof(u32),
+ hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu)
break;
struct htt_rx_in_ord_msdu_desc **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u32 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
u64 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
- rxd = (void *)msdu->data;
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(struct htt_rx_desc));
- skb_pull(msdu, sizeof(struct htt_rx_desc));
- skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
__skb_queue_tail(list, msdu);
if (!is_offload) {
- rxd = (void *)msdu->data;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
- trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
- skb_put(msdu, sizeof(*rxd));
- skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
- if (!(__le32_to_cpu(rxd->attention.flags) &
+ if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct rx_mpdu_end *rxd_mpdu_end;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_ppdu_start *rxd_ppdu_start;
struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss;
+ u8 *rxd_msdu_payload;
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
u32 stbc, nsts_su;
- info1 = __le32_to_cpu(rxd->ppdu_start.info1);
- info2 = __le32_to_cpu(rxd->ppdu_start.info2);
- info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
+ rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
+
+ info1 = __le32_to_cpu(rxd_ppdu_start->info1);
+ info2 = __le32_to_cpu(rxd_ppdu_start->info2);
+ info3 = __le32_to_cpu(rxd_ppdu_start->info3);
preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
if (mcs > 0x09) {
ath10k_warn(ar, "invalid MCS received %u\n", mcs);
ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
- __le32_to_cpu(rxd->attention.flags),
- __le32_to_cpu(rxd->mpdu_start.info0),
- __le32_to_cpu(rxd->mpdu_start.info1),
- __le32_to_cpu(rxd->msdu_start.common.info0),
- __le32_to_cpu(rxd->msdu_start.common.info1),
- rxd->ppdu_start.info0,
- __le32_to_cpu(rxd->ppdu_start.info1),
- __le32_to_cpu(rxd->ppdu_start.info2),
- __le32_to_cpu(rxd->ppdu_start.info3),
- __le32_to_cpu(rxd->ppdu_start.info4));
+ __le32_to_cpu(rxd_attention->flags),
+ __le32_to_cpu(rxd_mpdu_start->info0),
+ __le32_to_cpu(rxd_mpdu_start->info1),
+ __le32_to_cpu(rxd_msdu_start_common->info0),
+ __le32_to_cpu(rxd_msdu_start_common->info1),
+ rxd_ppdu_start->info0,
+ __le32_to_cpu(rxd_ppdu_start->info1),
+ __le32_to_cpu(rxd_ppdu_start->info2),
+ __le32_to_cpu(rxd_ppdu_start->info3),
+ __le32_to_cpu(rxd_ppdu_start->info4));
ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
- __le32_to_cpu(rxd->msdu_end.common.info0),
- __le32_to_cpu(rxd->mpdu_end.info0));
+ __le32_to_cpu(rxd_msdu_end_common->info0),
+ __le32_to_cpu(rxd_mpdu_end->info0));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"rx desc msdu payload: ",
- rxd->msdu_payload, 50);
+ rxd_msdu_payload, 50);
}
status->rate_idx = mcs;
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
if (!rxd)
return NULL;
- if (rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ if (rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
- if (!(rxd->msdu_end.common.info0 &
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
- peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
int i;
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
status->chains &= ~BIT(i);
- if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
+ if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_chains[i].pri20_mhz;
+ rxd_ppdu_start->rssi_chains[i].pri20_mhz;
status->chains |= BIT(i);
}
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
- rxd->ppdu_start.rssi_comb;
+ rxd_ppdu_start->rssi_comb;
status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_end_common *rxd_ppdu_end_common;
+
+ rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
+
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
* means all prior MSDUs in a PPDU are reported to mac80211 without the
* TSF. Is it worth holding frames until end of PPDU is known?
*
* FIXME: Can we get/compute 64bit TSF?
*/
- status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+ status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
u32 vdev_id)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
bool is_first_ppdu;
bool is_last_ppdu;
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_first_ppdu = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ is_first_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
- is_last_ppdu = !!(rxd->attention.flags &
+ is_last_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
if (is_first_ppdu) {
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
size_t hdr_len;
size_t crypto_len;
bool is_first;
int bytes_aligned = ar->hw_params.decap_align_bytes;
u8 *qos;
- rxd = (void *)msdu->data - sizeof(*rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
* error packets. If limit exceeds, hw sends all remaining MSDUs as
* a single last MSDU with this msdu limit error set.
*/
- msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd);
+ msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
* without first MSDU is expected in that case, and handled later here.
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
size_t hdr_len;
*/
/* pull decapped header and copy SA & DA */
- rxd = (void *)msdu->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ u8 *rxd_rx_hdr_status;
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
int bytes_aligned = ar->hw_params.decap_align_bytes;
- rxd = (void *)msdu->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+ hdr = (void *)rxd_rx_hdr_status;
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
size_t hdr_len;
if (WARN_ON_ONCE(!rfc1042))
return;
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, l3_pad_bytes);
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
* [payload]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
enum htt_rx_mpdu_encrypt_type enctype,
bool is_decrypted)
{
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
enum rx_msdu_decap_format decap;
/* First msdu's decapped header:
* [rfc1042/llc]
*/
- rxd = (void *)msdu->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
}
}
-static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
u32 flags, info;
bool is_ip4, is_ip6;
bool is_tcp, is_udp;
bool ip_csum_ok, tcpudp_csum_ok;
- rxd = (void *)skb->data - sizeof(*rxd);
- flags = __le32_to_cpu(rxd->attention.flags);
- info = __le32_to_cpu(rxd->msdu_start.common.info1);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ flags = __le32_to_cpu(rxd_attention->flags);
+ info = __le32_to_cpu(rxd_msdu_start_common->info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
return CHECKSUM_UNNECESSARY;
}
-static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
+ struct sk_buff *msdu)
{
- msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
struct sk_buff *first;
struct sk_buff *last;
struct sk_buff *msdu, *temp;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
return;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
- is_mgmt = !!(rxd->attention.flags &
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ is_mgmt = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
* decapped header. It'll be used for undecapping of each MSDU.
*/
- hdr = (void *)rxd->rx_hdr_status;
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
if (rx_hdr)
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
- rxd = (void *)last->data - sizeof(*rxd);
- attention = __le32_to_cpu(rxd->attention.flags);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)last->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ attention = __le32_to_cpu(rxd_attention->flags);
has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
continue;
}
- ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
unsigned long *unchain_cnt)
{
struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_frag_info_common *rxd_frag_info;
enum rx_msdu_decap_format decap;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
/* FIXME: Current unchaining logic can only handle simple case of raw
* try re-constructing such frames - it'll be pretty much garbage.
*/
if (decap != RX_MSDU_DECAP_RAW ||
- skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu);
return;
u8 *subframe_hdr;
struct sk_buff *first;
bool is_first, is_last;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
enum htt_rx_mpdu_encrypt_type enctype;
first = skb_peek(amsdu);
- rxd = (void *)first->data - sizeof(*rxd);
- hdr = (void *)rxd->rx_hdr_status;
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
- is_first = !!(rxd->msdu_end.common.info0 &
+ is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
- is_last = !!(rxd->msdu_end.common.info0 &
+ is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Return in case of non-aggregated msdu */
if (!is_first)
return false;
- enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
spin_unlock_bh(&ar->data_lock);
}
-static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
+ struct sk_buff_head *list,
struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
if (skb_queue_empty(list))
return -ENOBUFS;
while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
- if (!(rxd->msdu_end.common.info0 &
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
return 0;
}
-static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring32 *ring =
(struct htt_rx_ring_setup_ring32 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
-static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, void *rx_ring)
{
struct htt_rx_ring_setup_ring64 *ring =
(struct htt_rx_ring_setup_ring64 *)rx_ring;
-#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
- ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
- ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
- ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
- ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
- ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
- ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
- ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
- ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
- ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
- ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
-#undef desc_offset
+ ath10k_htt_rx_desc_get_offsets(hw, &ring->offsets);
}
static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_32(ring);
+ ath10k_htt_fill_rx_desc_offset_32(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring64 *ring;
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
- ath10k_htt_fill_rx_desc_offset_64(ring);
+ ath10k_htt_fill_rx_desc_offset_64(hw, ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
#include "hif.h"
#include "wmi-ops.h"
#include "bmi.h"
+#include "rx_desc.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_soc_base_address = 0x00004000,
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
-static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
-{
- return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
- RX_MSDU_END_INFO1_L3_HDR_PAD);
-}
-
-static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
-{
- return !!(rxd->msdu_end.common.info0 &
- __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
-}
-
const struct ath10k_hw_ops qca99x0_ops = {
- .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
- .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
.is_rssi_enable = ath10k_htt_tx_rssi_enable,
};
u32 outdiv;
};
+struct htt_rx_desc_ops;
+
struct ath10k_hw_params {
u32 id;
u16 dev_id;
*/
bool sw_decrypt_mcast_mgmt;
+ /* Rx descriptor abstraction */
+ const struct ath10k_htt_rx_desc_ops *rx_desc_ops;
+
const struct ath10k_hw_ops *hw_ops;
/* Number of bytes used for alignment in rx_hdr_status of rx desc. */
bool dynamic_sar_support;
};
-struct htt_rx_desc;
struct htt_resp;
struct htt_data_tx_completion_ext;
+struct htt_rx_ring_rx_desc_offsets;
/* Defines needed for Rx descriptor abstraction */
struct ath10k_hw_ops {
- int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
void (*set_coverage_class)(struct ath10k *ar, s16 value);
int (*enable_pll_clk)(struct ath10k *ar);
- bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
int (*is_rssi_enable)(struct htt_resp *resp);
};
extern const struct ath10k_hw_clk_params qca6174_clk[];
-static inline int
-ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
- return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
- return 0;
-}
-
-static inline bool
-ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw,
- struct htt_rx_desc *rxd)
-{
- if (hw->hw_ops->rx_desc_get_msdu_limit_error)
- return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd);
- return false;
-}
-
static inline int
ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
struct htt_resp *htt)
* descriptor.
*/
-struct rx_frag_info {
+struct rx_frag_info_common {
u8 ring0_more_count;
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
+} __packed;
+
+struct rx_frag_info_wcn3990 {
u8 ring4_more_count;
u8 ring5_more_count;
u8 ring6_more_count;
u8 ring7_more_count;
} __packed;
+struct rx_frag_info {
+ struct rx_frag_info_common common;
+ union {
+ struct rx_frag_info_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_frag_info_v1 {
+ struct rx_frag_info_common common;
+} __packed;
+
/*
* ring0_more_count
* Indicates the number of more buffers associated with RX DMA
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
- struct rx_msdu_start_qca99x0 qca99x0;
struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_start_v1 {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
* msdu_length
* MSDU length in bytes after decapsulation. This field is
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
- struct rx_msdu_end_qca99x0 qca99x0;
struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
+struct rx_msdu_end_v1 {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
/*
*ip_hdr_chksum
* This can include the IP header checksum or the pseudo header
} __packed;
struct rx_ppdu_end {
+ struct rx_ppdu_end_common common;
+ union {
+ struct rx_ppdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_ppdu_end_v1 {
struct rx_ppdu_end_common common;
union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
- struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
}
for (i = 0; i < CE_COUNT; i++) {
- res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
- if (!res) {
- ath10k_err(ar, "failed to get IRQ%d\n", i);
- ret = -ENODEV;
- goto out;
- }
- ar_snoc->ce_irqs[i].irq_line = res->start;
+ ret = platform_get_irq(ar_snoc->dev, i);
+ if (ret < 0)
+ return ret;
+ ar_snoc->ce_irqs[i].irq_line = ret;
}
ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
ar_snoc->xo_cal_data);
}
- ret = 0;
-out:
- return ret;
+ return 0;
}
static void ath10k_snoc_quirks_init(struct ath10k *ar)
tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
tx_done->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status(htt->ar->hw, msdu);
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
param->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
ieee80211_tx_status_irqsafe(ar->hw, msdu);
if (patterns[i].mask[j / 8] & BIT(j % 8))
bitmask[j] = 0xff;
old_pattern.mask = bitmask;
- new_pattern = old_pattern;
if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
- if (patterns[i].pkt_offset < ETH_HLEN)
+ if (patterns[i].pkt_offset < ETH_HLEN) {
ath10k_wow_convert_8023_to_80211(&new_pattern,
&old_pattern);
- else
+ } else {
+ new_pattern = old_pattern;
new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
}
if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
},
{
.name = "qca6390 hw2.0",
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
},
{
.name = "qcn9074 hw1.0",
.wakeup_mhi = false,
.supports_rssi_stats = false,
.fw_wmi_diag_event = false,
+ .current_cc_support = false,
},
{
.name = "wcn6855 hw2.0",
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
},
{
.name = "wcn6855 hw2.1",
.wakeup_mhi = true,
.supports_rssi_stats = true,
.fw_wmi_diag_event = true,
+ .current_cc_support = true,
},
};
void ath11k_core_free(struct ath11k_base *ab)
{
+ flush_workqueue(ab->workqueue);
+ destroy_workqueue(ab->workqueue);
+
kfree(ab);
}
EXPORT_SYMBOL(ath11k_core_free);
struct completion finish_11d_ch_list;
bool pending_11d;
bool regdom_set_by_user;
+ int hw_rate_code;
};
struct ath11k_band_cap {
goto exit;
}
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+ goto exit;
+ }
+
if (enable) {
rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
}
+static inline
+bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
+}
+
static inline
u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
struct hal_rx_desc *desc)
u8 bw;
u8 rate_mcs, nss;
u8 sgi;
- bool is_cck;
+ bool is_cck, is_ldpc;
pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
if (sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
+ if (is_ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
break;
case RX_MSDU_START_PKT_TYPE_11AX:
rx_status->rate_idx = rate_mcs;
return num_buffs_reaped;
}
-int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- enum hal_rx_mon_status hal_status;
- struct sk_buff *skb;
- struct sk_buff_head skb_list;
- struct hal_rx_mon_ppdu_info ppdu_info;
- struct ath11k_peer *peer;
- struct ath11k_sta *arsta;
- int num_buffs_reaped = 0;
- u32 rx_buf_sz;
- u16 log_type = 0;
-
- __skb_queue_head_init(&skb_list);
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
- &skb_list);
- if (!num_buffs_reaped)
- goto exit;
-
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
-
- while ((skb = __skb_dequeue(&skb_list))) {
- if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
- rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
- } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
- log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
- rx_buf_sz = DP_RX_BUFFER_SIZE;
- }
-
- if (log_type)
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
- hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
-
- if (ppdu_info.peer_id == HAL_INVALID_PEERID ||
- hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- rcu_read_lock();
- spin_lock_bh(&ab->base_lock);
- peer = ath11k_peer_find_by_id(ab, ppdu_info.peer_id);
-
- if (!peer || !peer->sta) {
- ath11k_dbg(ab, ATH11K_DBG_DATA,
- "failed to find the peer with peer_id %d\n",
- ppdu_info.peer_id);
- goto next_skb;
- }
-
- arsta = (struct ath11k_sta *)peer->sta->drv_priv;
- ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
-
- if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
-
-next_skb:
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
-
- dev_kfree_skb_any(skb);
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
- }
-exit:
- return num_buffs_reaped;
-}
-
static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
{
struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
}
}
-static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
- int mac_id, u32 quota,
- struct napi_struct *napi)
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
{
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- struct hal_rx_mon_ppdu_info *ppdu_info;
- struct sk_buff *status_skb;
- u32 tlv_status = HAL_TLV_STATUS_BUF_DONE;
- struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type = 0;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
+ struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
- ppdu_info = &pmon->mon_ppdu_info;
- rx_mon_stats = &pmon->rx_mon_stats;
+ __skb_queue_head_init(&skb_list);
- if (pmon->mon_ppdu_status != DP_PPDU_STATUS_START)
- return;
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
- while (!skb_queue_empty(&pmon->rx_status_q)) {
- status_skb = skb_dequeue(&pmon->rx_status_q);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
- tlv_status = ath11k_hal_rx_parse_mon_status(ar->ab, ppdu_info,
- status_skb);
- if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
+ while ((skb = __skb_dequeue(&skb_list))) {
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ }
+
+ if (log_type)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
- dev_kfree_skb_any(status_skb);
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
}
+exit:
+ return num_buffs_reaped;
}
static u32
return quota;
}
-static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
- struct napi_struct *napi, int budget)
-{
- struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
- int num_buffs_reaped = 0;
-
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
- &pmon->rx_status_q);
- if (num_buffs_reaped)
- ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
-
- return num_buffs_reaped;
-}
-
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
ab->hw_params.full_monitor_mode)
ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
- else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
- ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |=
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
}
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
ts->ack_rssi;
- info->status.is_valid_ack_signal = true;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
HAL_RX_RECEPTION_TYPE_MAX,
};
-#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
-#define HAL_TLV_STATUS_PPDU_DONE 1
-#define HAL_TLV_STATUS_BUF_DONE 2
-#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
#define HAL_RX_FCS_LEN 4
enum hal_rx_mon_status {
HAL_RX_MON_STATUS_BUF_DONE,
};
+#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
+
struct hal_sw_mon_ring_entries {
dma_addr_t mon_dst_paddr;
dma_addr_t mon_status_paddr;
__le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
}
+static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
__le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
}
+static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
.rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
.rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
.rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
.rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
.rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
.rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
bool wakeup_mhi;
bool supports_rssi_stats;
bool fw_wmi_diag_event;
+ bool current_cc_support;
};
struct ath11k_hw_ops {
u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
return;
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->bw_80 = true;
if (ret)
ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ar->hw_rate_code = hw_rate_code;
+
vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
hw_rate_code);
sta->addr, arvif->vdev_id);
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
- ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
+ vif->type == NL80211_IFTYPE_STATION;
- if (ar->ab->hw_params.vdev_start_delay &&
- vif->type == NL80211_IFTYPE_STATION)
- goto free;
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
- ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
- if (ret)
- ath11k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
- else
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
- sta->addr, arvif->vdev_id);
+ if (!skip_peer_delete) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab,
+ ATH11K_DBG_MAC,
+ "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ }
ath11k_mac_dec_num_stations(arvif, sta);
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
- if (peer && peer->sta == sta) {
+ if (skip_peer_delete && peer) {
+ peer->sta = NULL;
+ } else if (peer && peer->sta == sta) {
ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
vif->addr, arvif->vdev_id);
peer->sta = NULL;
}
spin_unlock_bh(&ar->ab->base_lock);
-free:
kfree(arsta->tx_stats);
arsta->tx_stats = NULL;
return ret;
}
+ /* Reconfigure hardware rate code since it is cleared by firmware.
+ */
+ if (ar->hw_rate_code > 0) {
+ u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ ar->hw_rate_code);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ return ret;
+ }
+ }
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
return -ENOENT;
ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
if (ret)
return ret;
{
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
- int ret;
+ int ret, fbret;
lockdep_assert_held(&ar->conf_mutex);
ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
param->peer_addr, param->vdev_id);
- reinit_completion(&ar->peer_delete_done);
-
- ret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
- param->vdev_id);
- if (ret) {
- ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
- param->vdev_id, param->peer_addr);
- return ret;
- }
-
- ret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
- param->peer_addr);
- if (ret)
- return ret;
-
- return -ENOENT;
+ ret = -ENOENT;
+ goto cleanup;
}
peer->pdev_idx = ar->pdev_idx;
spin_unlock_bh(&ar->ab->base_lock);
return 0;
+
+cleanup:
+ reinit_completion(&ar->peer_delete_done);
+
+ fbret = ath11k_wmi_send_peer_delete_cmd(ar, param->peer_addr,
+ param->vdev_id);
+ if (fbret) {
+ ath11k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ param->vdev_id, param->peer_addr);
+ goto exit;
+ }
+
+ fbret = ath11k_wait_for_peer_delete_done(ar, param->vdev_id,
+ param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed wait for peer %pM delete done id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
+
+exit:
+ return ret;
}
if (!hremote_node) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get hremote_node\n");
- return ret;
+ return -ENODEV;
}
ret = of_address_to_resource(hremote_node, 0, &res);
+ of_node_put(hremote_node);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
"qmi fail to get reg from hremote\n");
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
+ struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
return;
}
- /* Set the country code to the firmware and wait for
+ /* Set the country code to the firmware and will receive
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
- init_country_param.flags = ALPHA_IS_SET;
- memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
- init_country_param.cc_info.alpha2[2] = 0;
+ if (ar->ab->hw_params.current_cc_support) {
+ memcpy(&set_current_param.alpha2, request->alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+ init_country_param.cc_info.alpha2[2] = 0;
- ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
- if (ret)
- ath11k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
ath11k_mac_11d_scan_stop(ar);
ar->regdom_set_by_user = true;
u32 ah_txq_imr_nofrm;
u32 ah_txq_isr_txok_all;
- u32 ah_txq_isr_txurn;
- u32 ah_txq_isr_qcborn;
- u32 ah_txq_isr_qcburn;
- u32 ah_txq_isr_qtrig;
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+ ah->ah_txq_isr_txok_all = 0;
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
- /* Currently this is not much useful since we treat
- * all queues the same way if we get a TXURN (update
- * tx trigger level) but we might need it later on*/
- if (pisr & AR5K_ISR_TXURN)
- ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
- AR5K_SISR2_QCU_TXURN);
-
/* Misc Beacon related interrupts */
/* For AR5211 */
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRORN);
- }
/* A queue got CBR underrun */
- if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
- AR5K_SISR3_QCBRURN);
- }
/* A queue got triggered */
- if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
+ if (unlikely(pisr & (AR5K_ISR_QTRIG)))
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
- AR5K_SISR4_QTRIG);
- }
data = pisr;
}
}
}
+ if (idx == AR5K_EEPROM_N_PD_CURVES)
+ goto err_out;
+
ee->ee_pd_gains[mode] = 1;
pd = &chinfo[pier].pd_curves[idx];
int ath9k_hw_eeprom_init(struct ath_hw *ah)
{
- int status;
-
if (AR_SREV_9300_20_OR_LATER(ah))
ah->eep_ops = &eep_ar9300_ops;
else if (AR_SREV_9287(ah)) {
if (!ah->eep_ops->fill_eeprom(ah))
return -EIO;
- status = ah->eep_ops->check_eeprom(ah);
-
- return status;
+ return ah->eep_ops->check_eeprom(ah);
}
hdr->endpoint_id = epid;
hdr->flags = flags;
hdr->payload_len = cpu_to_be16(len);
+ memset(hdr->control, 0, sizeof(hdr->control));
status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb);
conn_msg->dl_pipeid = endpoint->dl_pipeid;
conn_msg->ul_pipeid = endpoint->ul_pipeid;
+ /* To prevent infoleak */
+ conn_msg->svc_meta_len = 0;
+ conn_msg->pad = 0;
+
ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0);
if (ret)
goto err;
static struct channel_detector *
channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
{
- u32 sz, i;
+ u32 i;
struct channel_detector *cd;
cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
- sz = sizeof(cd->detectors) * dpd->num_radar_types;
- cd->detectors = kzalloc(sz, GFP_ATOMIC);
+ cd->detectors = kmalloc_array(dpd->num_radar_types,
+ sizeof(*cd->detectors), GFP_ATOMIC);
if (cd->detectors == NULL)
goto fail;
INIT_LIST_HEAD(&wcn->vif_list);
spin_lock_init(&wcn->dxe_lock);
+ spin_lock_init(&wcn->survey_lock);
return 0;
{
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel;
+ unsigned long flags;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(wcn->hw->wiphy->bands); i++) {
+ band = wcn->hw->wiphy->bands[i];
+ if (!band)
+ break;
+ for (j = 0; j < band->n_channels; j++) {
+ if (HW_VALUE_CHANNEL(band->channels[j].hw_value) == ch) {
+ channel = &band->channels[j];
+ break;
+ }
+ }
+ if (channel)
+ break;
+ }
+
+ if (!channel) {
+ wcn36xx_err("Cannot tune to channel %d\n", ch);
+ return;
+ }
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+ wcn->band = band;
+ wcn->channel = channel;
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
+
+ return;
}
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
}
}
+static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct wcn36xx_chan_survey *chan_survey;
+ int band_idx;
+ unsigned long flags;
+
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ];
+ band_idx = idx;
+ if (band_idx >= sband->n_channels) {
+ band_idx -= sband->n_channels;
+ sband = wcn->hw->wiphy->bands[NL80211_BAND_5GHZ];
+ }
+
+ if (!sband || band_idx >= sband->n_channels)
+ return -ENOENT;
+
+ spin_lock_irqsave(&wcn->survey_lock, flags);
+
+ chan_survey = &wcn->chan_survey[idx];
+ survey->channel = &sband->channels[band_idx];
+ survey->noise = chan_survey->rssi - chan_survey->snr;
+ survey->filled = 0;
+
+ if (chan_survey->rssi > -100 && chan_survey->rssi < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ if (survey->channel == wcn->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+ spin_unlock_irqrestore(&wcn->survey_lock, flags);
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC,
+ "ch %d rssi %d snr %d noise %d filled %x freq %d\n",
+ HW_VALUE_CHANNEL(survey->channel->hw_value),
+ chan_survey->rssi, chan_survey->snr, survey->noise,
+ survey->filled, survey->channel->center_freq);
+
+ return 0;
+}
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
.flush = wcn36xx_flush,
+ .get_survey = wcn36xx_get_survey,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
{
struct device_node *mmio_node;
struct device_node *iris_node;
- struct resource *res;
int index;
int ret;
/* Set TX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "tx");
- if (!res) {
- wcn36xx_err("failed to get tx_irq\n");
- return -ENOENT;
- }
- wcn->tx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret < 0)
+ return ret;
+ wcn->tx_irq = ret;
/* Set RX IRQ */
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "rx");
- if (!res) {
- wcn36xx_err("failed to get rx_irq\n");
- return -ENOENT;
- }
- wcn->rx_irq = res->start;
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0)
+ return ret;
+ wcn->rx_irq = ret;
/* Acquire SMSM tx enable handle */
wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
void *wcnss;
int ret;
const u8 *addr;
+ int n_channels;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
goto out_wq;
}
+ n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
+ wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
+ if (!wcn->chan_survey) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
+static inline int get_snr(struct wcn36xx_rx_bd *bd)
+{
+ return ((bd->phy_stat1 >> 24) & 0xff);
+}
+
struct wcn36xx_rate {
u16 bitrate;
u16 mcs_or_legacy_index;
dev_kfree_skb_irq(skb);
}
+static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
+ int band, int freq)
+{
+ static struct ieee80211_channel *channel;
+ struct ieee80211_supported_band *sband;
+ int idx;
+ int i;
+
+ idx = 0;
+ if (band == NL80211_BAND_5GHZ)
+ idx = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
+
+ sband = wcn->hw->wiphy->bands[band];
+ channel = sband->channels;
+
+ for (i = 0; i < sband->n_channels; i++, channel++) {
+ if (channel->center_freq == freq) {
+ idx += i;
+ break;
+ }
+ }
+
+ spin_lock(&wcn->survey_lock);
+ wcn->chan_survey[idx].rssi = rssi;
+ wcn->chan_survey[idx].snr = snr;
+ spin_unlock(&wcn->survey_lock);
+}
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
+ wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
+ status.band, status.freq);
+
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
enum wcn36xx_ampdu_state ampdu_state[16];
int non_agg_frame_ct;
};
+
struct wcn36xx_dxe_ch;
+
+struct wcn36xx_chan_survey {
+ s8 rssi;
+ u8 snr;
+};
+
struct wcn36xx {
struct ieee80211_hw *hw;
struct device *dev;
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
#endif /* CONFIG_WCN36XX_DEBUGFS */
+
+ struct ieee80211_supported_band *band;
+ struct ieee80211_channel *channel;
+
+ spinlock_t survey_lock; /* protects chan_survey */
+ struct wcn36xx_chan_survey *chan_survey;
};
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
#include <brcmu_utils.h>
#include <defs.h>
#include <brcmu_wifi.h>
+#include <brcm_hw_ids.h>
#include "core.h"
#include "debug.h"
#include "tracepoint.h"
vif_event_equals(event, action), timeout);
}
+static bool brmcf_use_iso3166_ccode_fallback(struct brcmf_pub *drvr)
+{
+ switch (drvr->bus_if->chip) {
+ case BRCM_CC_4345_CHIP_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
struct brcmf_fil_country_le *ccreq)
{
s32 found_index;
int i;
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device\n");
- return -EINVAL;
- }
-
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ if (brmcf_use_iso3166_ccode_fallback(drvr)) {
+ brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->ccode[0] = alpha2[0];
+ ccreq->ccode[1] = alpha2[1];
+ return 0;
+ }
+
+ brcmf_dbg(TRACE, "No country codes configured for device\n");
+ return -EINVAL;
+ }
+
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
size = BRCMF_FW_MAX_NVRAM_SIZE;
else
size = data_len;
+ /* Add space for properties we may add */
+ size += strlen(BRCMF_FW_DEFAULT_BOARDREV) + 1;
/* Alloc for extra 0 byte + roundup by 4 + length field */
size += 1 + 3 + sizeof(u32);
nvp->nvram = kzalloc(size, GFP_KERNEL);
{
struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
- char *alt_path;
+ char *alt_path = NULL;
int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+ if (fwctx->req->board_type)
+ alt_path = brcm_alt_fw_path(first->path,
+ fwctx->req->board_type);
if (alt_path) {
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
}
static u32
-brcmf_create_iovar(char *name, const char *data, u32 datalen,
+brcmf_create_iovar(const char *name, const char *data, u32 datalen,
char *buf, u32 buflen)
{
u32 len;
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
}
s32
-brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
}
s32
-brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
}
s32
-brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
}
static u32
-brcmf_create_bsscfg(s32 bsscfgidx, char *name, char *data, u32 datalen,
+brcmf_create_bsscfg(s32 bsscfgidx, const char *name, char *data, u32 datalen,
char *buf, u32 buflen)
{
const s8 *prefix = "bsscfg:";
}
s32
-brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
}
s32
-brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name,
+brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
}
s32
-brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data)
+brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data)
{
__le32 data_le = cpu_to_le32(data);
}
s32
-brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data)
+brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
return err;
}
-static u32 brcmf_create_xtlv(char *name, u16 id, char *data, u32 len,
+static u32 brcmf_create_xtlv(const char *name, u16 id, char *data, u32 len,
char *buf, u32 buflen)
{
u32 iolen;
return iolen;
}
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
return err;
}
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
return err;
}
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data)
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data)
{
__le32 data_le = cpu_to_le32(data);
sizeof(data_le));
}
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data)
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data)
{
__le32 data_le = cpu_to_le32(*data);
s32 err;
return err;
}
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data)
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data)
{
return brcmf_fil_xtlv_data_get(ifp, name, id, data, sizeof(*data));
}
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data)
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data)
{
__le16 data_le = cpu_to_le16(*data);
s32 err;
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, const char *name, const void *data,
u32 len);
-s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, char *name, u32 *data);
+s32 brcmf_fil_iovar_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_iovar_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
-s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_set(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_bsscfg_data_get(struct brcmf_if *ifp, const char *name, void *data,
u32 len);
-s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, char *name, u32 data);
-s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, char *name, u32 *data);
-s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_bsscfg_int_set(struct brcmf_if *ifp, const char *name, u32 data);
+s32 brcmf_fil_bsscfg_int_get(struct brcmf_if *ifp, const char *name, u32 *data);
+s32 brcmf_fil_xtlv_data_set(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, char *name, u16 id,
+s32 brcmf_fil_xtlv_data_get(struct brcmf_if *ifp, const char *name, u16 id,
void *data, u32 len);
-s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, char *name, u16 id, u32 data);
-s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, char *name, u16 id, u32 *data);
-s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, char *name, u16 id, u8 *data);
-s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, char *name, u16 id, u16 *data);
+s32 brcmf_fil_xtlv_int_set(struct brcmf_if *ifp, const char *name, u16 id, u32 data);
+s32 brcmf_fil_xtlv_int_get(struct brcmf_if *ifp, const char *name, u16 id, u32 *data);
+s32 brcmf_fil_xtlv_int8_get(struct brcmf_if *ifp, const char *name, u16 id, u8 *data);
+s32 brcmf_fil_xtlv_int16_get(struct brcmf_if *ifp, const char *name, u16 id, u16 *data);
#endif /* _fwil_h_ */
/* Set board-type to the first string of the machine compatible prop */
root = of_find_node_by_path("/");
if (root) {
- int i, len;
+ int i;
char *board_type;
const char *tmp;
of_property_read_string_index(root, "compatible", 0, &tmp);
/* get rid of '/' in the compatible string to be able to find the FW */
- len = strlen(tmp) + 1;
- board_type = devm_kzalloc(dev, len, GFP_KERNEL);
- strscpy(board_type, tmp, len);
+ board_type = devm_kstrdup(dev, tmp, GFP_KERNEL);
+ if (!board_type) {
+ of_node_put(root);
+ return;
+ }
for (i = 0; i < board_type[i]; i++) {
if (board_type[i] == '/')
board_type[i] = '-';
u8 oui_type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
u8 type;
u8 subtype;
u8 dialog_token;
- u8 elts[1];
+ u8 elts[];
};
/**
u8 category;
u8 action;
u8 dialog_token;
- u8 query_data[1];
+ u8 query_data[];
};
/**
return false;
pact_frm = (struct brcmf_p2p_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_pub_act_frame) - 1)
+ if (frame_len < sizeof(*pact_frm))
return false;
if (pact_frm->category == P2P_PUB_AF_CATEGORY &&
return false;
act_frm = (struct brcmf_p2p_action_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2p_action_frame) - 1)
+ if (frame_len < sizeof(*act_frm))
return false;
if (act_frm->category == P2P_AF_CATEGORY &&
return false;
sd_act_frm = (struct brcmf_p2psd_gas_pub_act_frame *)frame;
- if (frame_len < sizeof(struct brcmf_p2psd_gas_pub_act_frame) - 1)
+ if (frame_len < sizeof(*sd_act_frm))
return false;
if (sd_act_frm->category != P2PSD_ACTION_CATEGORY)
#include <linux/interrupt.h>
#include <linux/bcma/bcma.h>
#include <linux/sched.h>
+#include <linux/io.h>
#include <asm/unaligned.h>
#include <soc.h>
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+/* firmware config files */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
+
+/* per-board firmware binaries */
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.bin");
+
static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43602_CHIP_ID, 0xFFFFFFFF, 43602),
BRCMF_FW_ENTRY(BRCM_CC_43465_CHIP_ID, 0xFFFFFFF0, 4366C),
}
-static void
-brcmf_pcie_copy_mem_todev(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
- void *srcaddr, u32 len)
-{
- void __iomem *address = devinfo->tcm + mem_offset;
- __le32 *src32;
- __le16 *src16;
- u8 *src8;
-
- if (((ulong)address & 4) || ((ulong)srcaddr & 4) || (len & 4)) {
- if (((ulong)address & 2) || ((ulong)srcaddr & 2) || (len & 2)) {
- src8 = (u8 *)srcaddr;
- while (len) {
- iowrite8(*src8, address);
- address++;
- src8++;
- len--;
- }
- } else {
- len = len / 2;
- src16 = (__le16 *)srcaddr;
- while (len) {
- iowrite16(le16_to_cpu(*src16), address);
- address += 2;
- src16++;
- len--;
- }
- }
- } else {
- len = len / 4;
- src32 = (__le32 *)srcaddr;
- while (len) {
- iowrite32(le32_to_cpu(*src32), address);
- address += 4;
- src32++;
- len--;
- }
- }
-}
-
-
static void
brcmf_pcie_copy_dev_tomem(struct brcmf_pciedev_info *devinfo, u32 mem_offset,
void *dstaddr, u32 len)
return;
console = &devinfo->shared.console;
+ if (!console->base_addr)
+ return;
addr = console->base_addr + BRCMF_CONSOLE_WRITEIDX_OFFSET;
newidx = brcmf_pcie_read_tcm32(devinfo, addr);
while (newidx != console->read_idx) {
{
}
+static int brcmf_pcie_preinit(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+
+ brcmf_dbg(PCIE, "Enter\n");
+
+ brcmf_pcie_intr_enable(buspub->devinfo);
+ brcmf_pcie_hostready(buspub->devinfo);
+
+ return 0;
+}
static int brcmf_pcie_tx(struct device *dev, struct sk_buff *skb)
{
}
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
+ .preinit = brcmf_pcie_preinit,
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
.txctl = brcmf_pcie_tx_ctlpkt,
shared->max_rxbufpost, shared->rx_dataoffset);
brcmf_pcie_bus_console_init(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
return 0;
}
return err;
brcmf_dbg(PCIE, "Download FW %s\n", devinfo->fw_name);
- brcmf_pcie_copy_mem_todev(devinfo, devinfo->ci->rambase,
- (void *)fw->data, fw->size);
+ memcpy_toio(devinfo->tcm + devinfo->ci->rambase,
+ (void *)fw->data, fw->size);
resetintr = get_unaligned_le32(fw->data);
release_firmware(fw);
brcmf_dbg(PCIE, "Download NVRAM %s\n", devinfo->nvram_name);
address = devinfo->ci->rambase + devinfo->ci->ramsize -
nvram_len;
- brcmf_pcie_copy_mem_todev(devinfo, address, nvram, nvram_len);
+ memcpy_toio(devinfo->tcm + address, nvram, nvram_len);
brcmf_fw_nvram_free(nvram);
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
ret = brcmf_chip_get_raminfo(devinfo->ci);
if (ret) {
brcmf_err(bus, "Failed to get RAM info\n");
+ release_firmware(fw);
+ brcmf_fw_nvram_free(nvram);
goto fail;
}
init_waitqueue_head(&devinfo->mbdata_resp_wait);
- brcmf_pcie_intr_enable(devinfo);
- brcmf_pcie_hostready(devinfo);
-
ret = brcmf_attach(&devinfo->pdev->dev);
if (ret)
goto fail;
return;
devinfo = bus->bus_priv.pcie->devinfo;
+ brcmf_pcie_bus_console_read(devinfo, false);
devinfo->state = BRCMFMAC_PCIE_STATE_DOWN;
if (devinfo->ci)
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_RAW_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4358_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4359_DEVICE_ID),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_DEVICE_ID),
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
/* per-board firmware binaries */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.bin");
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
+#define BRCM_PCIE_43570_RAW_DEVICE_ID 0xaa31
#define BRCM_PCIE_4358_DEVICE_ID 0x43e9
#define BRCM_PCIE_4359_DEVICE_ID 0x43ef
#define BRCM_PCIE_43602_DEVICE_ID 0x43ba
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
-config IWLWIFI_BCAST_FILTERING
- bool "Enable broadcast filtering"
- depends on IWLMVM
- help
- Say Y here to enable default bcast filtering configuration.
-
- Enabling broadcast filtering will drop any incoming wireless
- broadcast frames, except some very specific predefined
- patterns (e.g. incoming arp requests).
-
- If unsure, don't enable this option, as some programs might
- expect incoming broadcasts for their normal operations.
-
menu "Debugging Options"
config IWLWIFI_DEBUG
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
*/
DEBUG_LOG_MSG = 0xf7,
- /**
- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
- */
- BCAST_FILTER_CMD = 0xcf,
-
/**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- * start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
- BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset: starting offset of this pattern.
- * @reserved1: reserved
- * @val: value to match - big endian (MSB is the first
- * byte to match from offset pos).
- * @mask: mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
- u8 offset_type;
- u8 offset;
- __le16 reserved1;
- __be32 val;
- __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
- BCAST_FILTER_FRAME_TYPE_ALL = 0,
- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- * only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
- u8 discard;
- u8 frame_type;
- u8 num_attrs;
- u8 reserved1;
- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
- u8 default_discard;
- u8 reserved1;
- __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
- u8 disable;
- u8 max_bcast_filters;
- u8 max_macs;
- u8 reserved1;
- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
#endif /* __iwl_fw_api_filter_h__ */
u8 iwl_fw_rate_idx_to_plcp(int idx);
u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
u32 iwl_new_rate_from_v1(u32 rate_v1)
{
u32 rate_v2 = 0;
} else {
u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
- WARN_ON(legacy_rate < 0);
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
rate_v2 |= legacy_rate;
if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
}
IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return -1;
-}
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_3160 (0x0000164)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ /* drv has just been freed by the release */
+ failure = false;
free:
if (failure)
iwl_dealloc_ucode(drv);
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
* @csme_taking_ownership: true when CSME is taking ownership. Used to remember
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
* @csa_throttle_end_wk: used when &csa_throttled is true
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
bool amt_enabled;
bool csa_throttled;
bool csme_taking_ownership;
+ bool link_prot_state;
struct delayed_work csa_throttle_end_wk;
spinlock_t data_q_lock;
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
- dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
- ret);
mem->ctrl = NULL;
return ret;
iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+ mei->link_prot_state = status->link_prot_state;
+
/*
* Update the Rfkill state in case the host does not own the device:
* if we are in Link Protection, ask to not touch the device, else,
mei_cldev_get_drvdata(iwl_mei_global_cldev);
/* we have already a SAP connection */
- if (iwl_mei_is_connected())
+ if (iwl_mei_is_connected()) {
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state);
+ }
}
ret = 0;
#endif /* CONFIG_DEBUG_FS */
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
/*
* iwl_mei_probe - the probe function called by the mei bus enumeration
*
static int iwl_mei_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
+ int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
struct iwl_mei *mei;
int ret;
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
- /*
- * The CSME firmware needs to boot the internal WLAN client. Wait here
- * so that the DMA map request will succeed.
- */
- msleep(20);
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (!ret)
+ break;
+ /*
+ * The CSME firmware needs to boot the internal WLAN client.
+ * This can take time in certain configurations (usually
+ * upon resume and when the whole CSME firmware is shut down
+ * during suspend).
+ *
+ * Wait a bit before retrying and hope we'll succeed next time.
+ */
- ret = iwl_mei_alloc_shared_mem(cldev);
- if (ret)
+ dev_dbg(&cldev->dev,
+ "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+ ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+ msleep(100);
+ alloc_retry--;
+ } while (alloc_retry);
+
+ if (ret) {
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
goto free;
+ }
iwl_mei_init_shared_mem(mei);
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
- !pskb_may_pull(skb, skb_network_offset(skb) +
- sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
return false;
iphdrlen = ip_hdrlen(skb);
return count;
}
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- const struct iwl_fw_bcast_filter *filter;
- char *buf;
- int bufsz = 1024;
- int i, j, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
- filter = &cmd.filters[i];
-
- ADD_TEXT("Filter [%d]:\n", i);
- ADD_TEXT("\tDiscard=%d\n", filter->discard);
- ADD_TEXT("\tFrame Type: %s\n",
- filter->frame_type ? "IPv4" : "Generic");
-
- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
- const struct iwl_fw_bcast_filter_attr *attr;
-
- attr = &filter->attrs[j];
- if (!attr->mask)
- break;
-
- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
- j, attr->offset,
- attr->offset_type ? "IP End" :
- "Payload Start",
- be32_to_cpu(attr->mask),
- be32_to_cpu(attr->val),
- le16_to_cpu(attr->reserved1));
- }
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- int pos, next_pos;
- struct iwl_fw_bcast_filter filter = {};
- struct iwl_bcast_filter_cmd cmd;
- u32 filter_id, attr_id, mask, value;
- int err = 0;
-
- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
- &filter.frame_type, &pos) != 3)
- return -EINVAL;
-
- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
- return -EINVAL;
-
- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
- attr_id++) {
- struct iwl_fw_bcast_filter_attr *attr =
- &filter.attrs[attr_id];
-
- if (pos >= count)
- break;
-
- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
- &attr->offset, &attr->offset_type,
- &mask, &value, &next_pos) != 4)
- return -EINVAL;
-
- attr->mask = cpu_to_be32(mask);
- attr->val = cpu_to_be32(value);
- if (mask)
- filter.num_attrs++;
-
- pos += next_pos;
- }
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
- &filter, sizeof(filter));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- char *buf;
- int bufsz = 1024;
- int i, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
- i, mac->default_discard, mac->attached_filters);
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_bcast_filter_cmd cmd;
- struct iwl_fw_bcast_mac mac = {};
- u32 mac_id, attached_filters;
- int err = 0;
-
- if (!mvm->bcast_filters)
- return -ENOENT;
-
- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
- &attached_filters) != 3)
- return -EINVAL;
-
- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
- mac.default_discard > 1 ||
- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
- return -EINVAL;
-
- mac.attached_filters = cpu_to_le16(attached_filters);
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
- &mac, sizeof(mac));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-#endif
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
#ifdef CONFIG_ACPI
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
- bcast_dir = debugfs_create_dir("bcast_filtering",
- mvm->debugfs_dir);
-
- debugfs_create_bool("override", 0600, bcast_dir,
- &mvm->dbgfs_bcast_filtering.override);
-
- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
- bcast_dir, 0600);
- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
- bcast_dir, 0600);
- }
-#endif
-
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
ret = iwl_mvm_sar_init(mvm);
if (ret == 0)
ret = iwl_mvm_sar_geo_init(mvm);
- else if (ret < 0)
+ if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
},
};
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- * be the vif's ip address. in case there is not a single
- * ip address (0, or more than 1), this attribute will
- * be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- * the LSB bytes of the vif's mac address
- */
-enum {
- BC_FILTER_MAGIC_NONE = 0,
- BC_FILTER_MAGIC_IP,
- BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
- {
- /* arp */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
- .attrs = {
- {
- /* frame type - arp, hw type - ethernet */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header),
- .val = cpu_to_be32(0x08060001),
- .mask = cpu_to_be32(0xffffffff),
- },
- {
- /* arp dest ip */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header) + 2 +
- sizeof(struct arphdr) +
- ETH_ALEN + sizeof(__be32) +
- ETH_ALEN,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
- },
- },
- },
- {
- /* dhcp offer bcast */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
- .attrs = {
- {
- /* udp dest port - 68 (bootp client)*/
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = offsetof(struct udphdr, dest),
- .val = cpu_to_be32(0x00440000),
- .mask = cpu_to_be32(0xffff0000),
- },
- {
- /* dhcp - lsb bytes of client hw address */
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = 38,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
- },
- },
- },
- /* last filter must be empty */
- {},
-};
-#endif
-
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.max_peers = IWL_MVM_TOF_MAX_APS,
.report_ap_tsf = 1,
}
#endif
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* assign default bcast filtering configuration */
- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
mutex_unlock(&mvm->mutex);
}
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
- struct iwl_mvm *mvm;
- struct iwl_bcast_filter_cmd *cmd;
- u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
- const struct iwl_fw_bcast_filter *in_filter,
- struct iwl_fw_bcast_filter *out_filter)
-{
- struct iwl_fw_bcast_filter_attr *attr;
- int i;
-
- memcpy(out_filter, in_filter, sizeof(*out_filter));
-
- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
- attr = &out_filter->attrs[i];
-
- if (!attr->mask)
- break;
-
- switch (attr->reserved1) {
- case cpu_to_le16(BC_FILTER_MAGIC_IP):
- if (vif->bss_conf.arp_addr_cnt != 1) {
- attr->mask = 0;
- continue;
- }
-
- attr->val = vif->bss_conf.arp_addr_list[0];
- break;
- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
- attr->val = *(__be32 *)&vif->addr[2];
- break;
- default:
- break;
- }
- attr->reserved1 = 0;
- out_filter->num_attrs++;
- }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_bcast_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct iwl_bcast_filter_cmd *cmd = data->cmd;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_fw_bcast_mac *bcast_mac;
- int i;
-
- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
- return;
-
- bcast_mac = &cmd->macs[mvmvif->id];
-
- /*
- * enable filtering only for associated stations, but not for P2P
- * Clients
- */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
- !vif->bss_conf.assoc)
- return;
-
- bcast_mac->default_discard = 1;
-
- /* copy all configured filters */
- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
- /*
- * Make sure we don't exceed our filters limit.
- * if there is still a valid filter to be configured,
- * be on the safe side and just allow bcast for this mac.
- */
- if (WARN_ON_ONCE(data->current_filter >=
- ARRAY_SIZE(cmd->filters))) {
- bcast_mac->default_discard = 0;
- bcast_mac->attached_filters = 0;
- break;
- }
-
- iwl_mvm_set_bcast_filter(vif,
- &mvm->bcast_filters[i],
- &cmd->filters[data->current_filter]);
-
- /* skip current filter if it contains no attributes */
- if (!cmd->filters[data->current_filter].num_attrs)
- continue;
-
- /* attach the filter to current mac */
- bcast_mac->attached_filters |=
- cpu_to_le16(BIT(data->current_filter));
-
- data->current_filter++;
- }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd)
-{
- struct iwl_bcast_iter_data iter_data = {
- .mvm = mvm,
- .cmd = cmd,
- };
-
- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
- return false;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
- cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* use debugfs filters/macs if override is configured */
- if (mvm->dbgfs_bcast_filtering.override) {
- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
- sizeof(cmd->filters));
- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
- sizeof(cmd->macs));
- return true;
- }
-#endif
-
- /* if no filters are configured, do nothing */
- if (!mvm->bcast_filters)
- return false;
-
- /* configure and attach these filters for each associated sta vif */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bcast_filter_iterator, &iter_data);
-
- return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- struct iwl_bcast_filter_cmd cmd;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
- return 0;
-
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
-
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
}
iwl_mvm_recalc_multicast(mvm);
- iwl_mvm_configure_bcast_filter(mvm);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
}
}
- if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
- iwl_mvm_configure_bcast_filter(mvm);
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_apply_fw_smps_request(vif);
}
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* broadcast filters to configure for each associated station */
- const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct {
- bool override;
- struct iwl_bcast_filter_cmd cmd;
- } dbgfs_bcast_filtering;
-#endif
-#endif
-
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
struct iwl_mvm_int_sta snif_sta;
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
- mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MARKER_CMD),
HCMD_NAME(BT_PROFILE_NOTIFICATION),
- HCMD_NAME(BCAST_FILTER_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_SF_CFG_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
shdr->type != htons(ETH_P_PAE) &&
shdr->type != htons(ETH_P_TDLS))))
skb->ip_summed = CHECKSUM_NONE;
+ else
+ /* mac80211 assumes full CSUM including SNAP header */
+ skb_postpush_rcsum(skb, shdr, sizeof(*shdr));
}
fraglen = len - headlen;
struct ieee80211_tx_rate *r = &info->status.rates[0];
if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 6)
+ TX_CMD, 0) <= 6)
rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
info->status.antenna =
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ if (!ieee80211_tx_prepare_skb(hwsim->hw,
+ hwsim->hw_scan_vif,
+ probe,
+ hwsim->tmp_chan->band,
+ NULL)) {
+ kfree_skb(probe);
+ continue;
+ }
+
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
+
+ if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+ txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
out:
{
int i;
- if (!q)
+ if (!q || !q->ndesc)
return;
/* clear descriptors */
struct mt76_queue_entry entry;
int last;
- if (!q)
+ if (!q || !q->ndesc)
return;
spin_lock_bh(&q->cleanup_lock);
int len = SKB_WITH_OVERHEAD(q->buf_size);
int offset = q->buf_offset;
+ if (!q->ndesc)
+ return 0;
+
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
qbuf.addr = addr + offset;
qbuf.len = len - offset;
+ qbuf.skip_unmap = false;
mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
frames++;
}
void *buf;
bool more;
+ if (!q->ndesc)
+ return;
+
spin_lock_bh(&q->lock);
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
struct mt76_queue *q = &dev->q_rx[qid];
int i;
+ if (!q->ndesc)
+ return;
+
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
{ .start_freq = 5725, .end_freq = 5950, },
};
-const struct cfg80211_sar_capa mt76_sar_capa = {
+static const struct cfg80211_sar_capa mt76_sar_capa = {
.type = NL80211_SAR_TYPE_POWER,
.num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
.freq_ranges = &mt76_sar_freq_ranges[0],
wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
mt76_update_survey(phy);
+ if (phy->chandef.chan->center_freq != chandef->chan->center_freq ||
+ phy->chandef.width != chandef->width)
+ phy->dfs_state = MT_DFS_STATE_UNKNOWN;
+
phy->chandef = *chandef;
phy->chan_state = mt76_channel_state(phy, chandef->chan);
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
+{
+ struct ieee80211_hw *hw = phy->hw;
+ struct mt76_dev *dev = phy->dev;
+
+ if (dev->region == NL80211_DFS_UNSET ||
+ test_bit(MT76_SCANNING, &phy->state))
+ return MT_DFS_STATE_DISABLED;
+
+ if (!hw->conf.radar_enabled) {
+ if ((hw->conf.flags & IEEE80211_CONF_MONITOR) &&
+ (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return MT_DFS_STATE_ACTIVE;
+
+ return MT_DFS_STATE_DISABLED;
+ }
+
+ if (phy->chandef.chan->dfs_state != NL80211_DFS_AVAILABLE)
+ return MT_DFS_STATE_CAC;
+
+ return MT_DFS_STATE_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(mt76_phy_dfs_state);
MT_RXQ_MCU_WA,
MT_RXQ_EXT,
MT_RXQ_EXT_WA,
+ MT_RXQ_MAIN_WA,
__MT_RXQ_MAX
};
MT_CIPHER_GCMP_256,
};
+enum mt76_dfs_state {
+ MT_DFS_STATE_UNKNOWN,
+ MT_DFS_STATE_DISABLED,
+ MT_DFS_STATE_CAC,
+ MT_DFS_STATE_ACTIVE,
+};
+
struct mt76_queue_buf {
dma_addr_t addr;
u16 len;
MT_WCID_FLAG_HDR_TRANS,
};
-#define MT76_N_WCIDS 288
+#define MT76_N_WCIDS 544
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_EXT_PHY BIT(3)
} mcu;
};
-#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
+#define MT76S_XMIT_BUF_SZ 0x3fe00
#define MT76S_NUM_TX_ENTRIES 256
#define MT76S_NUM_RX_ENTRIES 512
struct mt76_sdio {
struct work_struct stat_work;
- u8 *xmit_buf[IEEE80211_NUM_ACS + 2];
+ u8 *xmit_buf;
+ u32 xmit_buf_sz;
struct sdio_func *func;
void *intr_data;
u8 band_idx;
u8 wmm_idx;
u8 scan_seq_num;
+ u8 cipher;
};
struct mt76_phy {
struct ieee80211_channel *main_chan;
struct mt76_channel_state *chan_state;
+ enum mt76_dfs_state dfs_state;
ktime_t survey_time;
struct mt76_hw_cap cap;
#define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__)
#define mt76_for_each_q_rx(dev, i) \
- for (i = 0; i < ARRAY_SIZE((dev)->q_rx) && \
- (dev)->q_rx[i].ndesc; i++)
+ for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \
+ if ((dev)->q_rx[i].ndesc)
struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
const u8 *mac);
void mt76_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
+enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy);
int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len);
int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
if (!mt7615_firmware_offload(dev) || mt76_is_usb(&dev->mt76))
return -EOPNOTSUPP;
- if (val == pm->enable)
- return 0;
+ mutex_lock(&dev->mt76.mutex);
- mt7615_mutex_acquire(dev);
+ if (val == pm->enable)
+ goto out;
if (dev->phy.n_beacon_vif) {
ret = -EBUSY;
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
+ /* make sure the chip is awake here and ps_work is scheduled
+ * just at end of the this routine.
+ */
+ pm->enable = false;
+ mt76_connac_pm_wake(&dev->mphy, pm);
+
pm->enable = val;
+ mt76_connac_power_save_sched(&dev->mphy, pm);
out:
- mt7615_mutex_release(dev);
+ mutex_unlock(&dev->mt76.mutex);
return ret;
}
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
mt7615_cap_dbdc_disable(dev);
- dev->phy.dfs_state = -1;
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7615_testmode_ops;
static int mt7615_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7615_sta *msta = (struct mt7615_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
__le32 qos_ctrl, ht_ctrl;
if (FIELD_GET(MT_RXD1_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[1])) !=
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
hdr.frame_control = FIELD_GET(MT_RXD4_FRAME_CONTROL, rxd[4]);
hdr.seq_ctrl = FIELD_GET(MT_RXD6_SEQ_CTRL, rxd[6]);
qos_ctrl = FIELD_GET(MT_RXD6_QOS_CTL, rxd[6]);
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
mt7615_txwi_free(dev, txwi);
}
-static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+static void mt7615_mac_tx_free(struct mt7615_dev *dev, void *data, int len)
{
- struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)data;
+ void *end = data + len;
u8 i, count;
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
+
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le16_to_cpu(token[i]));
} else {
__le32 *token = (__le32 *)&free->token[0];
+ if (WARN_ON_ONCE((void *)&token[count] > end))
+ return;
+
for (i = 0; i < count; i++)
mt7615_mac_tx_free_token(dev, le32_to_cpu(token[i]));
}
- dev_kfree_skb(skb);
-
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+EXPORT_SYMBOL_GPL(mt7615_rx_check);
+
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
dev_kfree_skb(skb);
break;
case PKT_TYPE_TXRX_NOTIFY:
- mt7615_mac_tx_free(dev, skb);
+ mt7615_mac_tx_free(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
break;
case PKT_TYPE_RX_EVENT:
mt7615_mcu_rx_event(dev, skb);
int i;
if (mt76_is_sdio(mdev)) {
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_worker_schedule(&mdev->sdio.txrx_worker);
} else {
mt76_for_each_q_rx(mdev, i)
test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state))
goto out;
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
+ goto out;
+
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
delta = dev->pm.last_activity + delta - jiffies;
goto out;
struct mt7615_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7615_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7615_dfs_start_rdd(struct mt7615_dev *dev, int chain)
{
int err;
- err = mt7615_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- return mt7615_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
- MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7615_dfs_start_radar_detector(struct mt7615_phy *phy)
int err;
/* start CAC */
- err = mt7615_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7615_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
if (is_mt7663(&dev->mt76))
return 0;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
-
- return 0;
- }
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
+ if (prev_state == dfs_state)
return 0;
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7615_dfs_stop_radar_detector(phy);
- err = mt7615_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7615_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7615_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
+
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7615_dfs_start_radar_detector(phy);
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
- return mt7615_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ ext_phy, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
}
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7615_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7615_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
+ if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ !(mphy->chandef.chan->flags & IEEE80211_CHAN_RADAR))
return;
if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
struct ieee80211_key_conf *key)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
&mvif->sta;
mt7615_mutex_acquire(dev);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7615_mcu_add_bss_info(phy, vif, NULL, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
int i;
+ if (!sta_rates)
+ return;
+
spin_lock_bh(&dev->mt76.lock);
for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
msta->rates[i].idx = sta_rates->rate[i].idx;
#define IMG_CRC_LEN 4
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_VALID_RAM_ENTRY BIT(5)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
int cmd, int *wait_seq)
{
static int
mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 rsv[3];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = state ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL),
- &req, sizeof(req), true);
-}
-
-static int
-mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct mt7615_phy *phy,
- bool enable)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
- struct bss_info_basic *bss;
- u8 wlan_idx = mvif->sta.wcid.idx;
- struct tlv *tlv;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable && sta) {
- struct mt7615_sta *msta;
-
- msta = (struct mt7615_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = wlan_idx;
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- u8 omac_idx = mvif->mt76.omac_idx;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- if (vif->p2p)
- type = CONNECTION_P2P_GO;
- else
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- if (vif->p2p)
- type = CONNECTION_P2P_GC;
- else
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
-}
-
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
-static void
-mt7615_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7615_vif *mvif)
-{
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+ return mt76_connac_mcu_set_pm(&dev->mt76, band, state);
}
static int
return PTR_ERR(skb);
if (enable)
- mt7615_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, sta, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7615_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
NULL, wtbl_hdr);
if (sta)
mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, wskb, sta,
- NULL, wtbl_hdr);
+ NULL, wtbl_hdr, true);
mt76_connac_mcu_wtbl_hdr_trans_tlv(wskb, vif, &msta->wcid,
NULL, wtbl_hdr);
}
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb = NULL;
-
- wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
- WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, &msta->wcid, NULL,
- wtbl_hdr);
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(WTBL_UPDATE), true);
+ return mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const struct mt7615_mcu_ops wtbl_update_ops = {
struct mt7615_sta *sta = (struct mt7615_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &sta->vif->mt76, params,
- enable, true);
+ MCU_UNI_CMD(STA_REC_UPDATE), enable,
+ true);
}
static int
return ret;
}
-static u32 mt7615_mcu_gen_dl_mode(u8 feature_set, bool is_cr4)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
const struct mt7615_fw_trailer *hdr,
u32 len, addr, mode;
for (i = 0; i < n_region; i++) {
- mode = mt7615_mcu_gen_dl_mode(hdr[i].feature_set, is_cr4);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ hdr[i].feature_set, is_cr4);
len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
addr = le32_to_cpu(hdr[i].addr);
dev_info(dev->mt76.dev, "Parsing tailer Region: %d\n", i);
buf = (const struct mt7663_fw_buf *)(base_addr - shift);
- mode = mt7615_mcu_gen_dl_mode(buf->feature_set, false);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ buf->feature_set, false);
addr = le32_to_cpu(buf->img_dest_addr);
len = le32_to_cpu(buf->img_size);
&req, sizeof(req), true);
}
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL),
- &req, sizeof(req), true);
-}
-
int mt7615_mcu_set_fcc5_lpn(struct mt7615_dev *dev, int val)
{
struct {
.token_size = MT7615_TOKEN_SIZE,
.tx_prepare_skb = mt7615_tx_prepare_skb,
.tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_check = mt7615_rx_check,
.rx_skb = mt7615_queue_rx_skb,
.rx_poll_complete = mt7615_rx_poll_complete,
.sta_ps = mt7615_sta_ps,
int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
const struct ieee80211_tx_queue_params *params);
void mt7615_mcu_rx_event(struct mt7615_dev *dev, struct sk_buff *skb);
-int mt7615_mcu_rdd_cmd(struct mt7615_dev *dev,
- enum mt7615_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val);
int mt7615_mcu_rdd_send_pattern(struct mt7615_dev *dev);
int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl);
-static inline bool is_mt7622(struct mt76_dev *dev)
-{
- if (!IS_ENABLED(CONFIG_MT7622_WMAC))
- return false;
-
- return mt76_chip(dev) == 0x7622;
-}
-
-static inline bool is_mt7615(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
-}
-
-static inline bool is_mt7611(struct mt76_dev *dev)
-{
- return mt76_chip(dev) == 0x7611;
-}
-
static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
{
mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
void mt7615_tx_worker(struct mt76_worker *w);
void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_tx_token_put(struct mt7615_dev *dev);
+bool mt7615_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
struct mt7663s_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err)
return err;
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
- int i, ret;
+ int ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
static const struct usb_device_id mt7615_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x043e, 0x310c, 0xff, 0xff, 0xff) },
{ },
};
};
struct mt76_connac_pm {
- bool enable;
- bool ds_enable;
- bool suspended;
+ bool enable:1;
+ bool enable_user:1;
+ bool ds_enable:1;
+ bool ds_enable_user:1;
+ bool suspended:1;
spinlock_t txq_lock;
struct {
unsigned long last_activity;
};
+struct mt76_connac_sta_key_conf {
+ s8 keyidx;
+ u8 key[16];
+};
+
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
static inline bool is_mt7922(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7663;
}
+static inline bool is_mt7915(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7915;
+}
+
+static inline bool is_mt7916(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7906;
+}
+
+static inline bool is_mt7622(struct mt76_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT7622_WMAC))
+ return false;
+
+ return mt76_chip(dev) == 0x7622;
+}
+
+static inline bool is_mt7615(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7615 || mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_mt7611(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7611;
+}
+
+static inline bool is_connac_v1(struct mt76_dev *dev)
+{
+ return is_mt7615(dev) || is_mt7663(dev) || is_mt7622(dev);
+}
+
+static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
+{
+ static const u8 width_to_bw[] = {
+ [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
+ [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
+ [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
+ [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
+ [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
+ [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
+ [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ };
+
+ if (chandef->width >= ARRAY_SIZE(width_to_bw))
+ return 0;
+
+ return width_to_bw[chandef->width];
+}
+
+static inline u8 mt76_connac_lmac_mapping(u8 ac)
+{
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
+}
+
int mt76_connac_pm_wake(struct mt76_phy *phy, struct mt76_connac_pm *pm);
void mt76_connac_power_save_sched(struct mt76_phy *phy,
struct mt76_connac_pm *pm);
};
int cmd;
- if (is_mt7921(dev) &&
- (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS) || addr == 0x900000))
+ if ((!is_connac_v1(dev) && addr == MCU_PATCH_ADDRESS) ||
+ (is_mt7921(dev) && addr == 0x900000))
cmd = MCU_CMD(PATCH_START_REQ);
else
cmd = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_nested_tlv);
struct sk_buff *
-mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid)
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len)
{
struct sta_req_hdr hdr = {
.bss_idx = mvif->idx,
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
&hdr.wlan_idx_hi);
- skb = mt76_mcu_msg_alloc(dev, NULL, MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_mcu_msg_alloc(dev, NULL, len);
if (!skb)
return ERR_PTR(-ENOMEM);
return skb;
}
-EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_sta_req);
+EXPORT_SYMBOL_GPL(__mt76_connac_mcu_alloc_sta_req);
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
}
if (sta_hdr)
- sta_hdr->len = cpu_to_le16(sizeof(hdr));
+ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
return skb_put_data(nskb, &hdr, sizeof(hdr));
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_alloc_wtbl_req);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u8 omac_idx = mvif->omac_idx;
+ struct bss_info_omac *omac;
+ struct tlv *tlv;
+ u32 type = 0;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GO;
+ else
+ type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (vif->p2p)
+ type = CONNECTION_P2P_GC;
+ else
+ type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = CONNECTION_IBSS_ADHOC;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
+
+ omac = (struct bss_info_omac *)tlv;
+ omac->conn_type = cpu_to_le32(type);
+ omac->omac_idx = mvif->omac_idx;
+ omac->band_idx = mvif->band_idx;
+ omac->hw_bss_idx = omac_idx > EXT_BSSID_START ? HW_BSSID_0 : omac_idx;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
+
void mt76_connac_mcu_sta_basic_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
-static void
-mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct sta_rec_uapsd *uapsd;
struct tlv *tlv;
}
uapsd->max_sp = sta->max_sp;
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_uapsd);
void mt76_connac_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
sizeof(*htr),
wtbl_tlv, sta_wtbl);
htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
+ htr->no_rx_trans = true;
if (vif->type == NL80211_IFTYPE_STATION)
htr->to_ds = true;
else
htr->from_ds = true;
+ if (!wcid)
+ return;
+
+ htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
htr->to_ds = true;
htr->from_ds = true;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_update_hdr_trans);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct sk_buff *skb = NULL;
+
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(dev, wcid, WTBL_SET, NULL,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, NULL, wtbl_hdr);
+
+ return mt76_mcu_skb_send_msg(dev, skb, MCU_EXT_CMD(WTBL_UPDATE), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_update_hdr_trans);
+
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev,
struct sk_buff *skb,
struct ieee80211_vif *vif,
generic->muar_idx = mvif->omac_idx;
generic->qos = sta->wme;
} else {
- if (is_mt7921(dev) &&
- vif->type == NL80211_IFTYPE_STATION)
+ if (!is_connac_v1(dev) && vif->type == NL80211_IFTYPE_STATION)
memcpy(generic->peer_addr, vif->bss_conf.bssid,
ETH_ALEN);
else
rx->rca2 = 1;
rx->rv = 1;
- if (is_mt7921(dev))
+ if (!is_connac_v1(dev))
return;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SPE, sizeof(*spe),
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_tlv);
-static void
-mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv)
{
struct wtbl_smps *smps;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
-
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- smps->smps = true;
+ smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_smps_tlv);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
+ void *wtbl_tlv, bool ldpc)
{
struct wtbl_ht *ht = NULL;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+ ht->ldpc = ldpc &&
+ !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
ht->af = sta->ht_cap.ampdu_factor;
ht->mm = sta->ht_cap.ampdu_density;
ht->ht = true;
sizeof(*vht), wtbl_tlv,
sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->ldpc = ldpc &&
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
- if (!is_mt7921(dev) && sta->ht_cap.ht_supported) {
+ if (is_connac_v1(dev) && sta->ht_cap.ht_supported) {
/* sgi */
u32 msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
sta_wtbl, wtbl_hdr);
if (info->sta)
mt76_connac_mcu_wtbl_ht_tlv(dev, skb, info->sta,
- sta_wtbl, wtbl_hdr);
+ sta_wtbl, wtbl_hdr, true);
}
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
ba->rst_ba_sb = 1;
}
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
ba->ba_winsize = enable ? cpu_to_le16(params->buf_size) : 0;
return;
}
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+ int cmd, bool enable, bool tx)
{
struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv;
struct wtbl_req_hdr *wtbl_hdr;
mt76_connac_mcu_wtbl_ba_tlv(dev, skb, params, enable, tx, sta_wtbl,
wtbl_hdr);
- ret = mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ ret = mt76_mcu_skb_send_msg(dev, skb, cmd, true);
if (ret)
return ret;
mt76_connac_mcu_sta_ba_tlv(skb, params, enable, tx);
- return mt76_mcu_skb_send_msg(dev, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
+ return mt76_mcu_skb_send_msg(dev, skb, cmd, true);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
-static u8
-mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band,
- struct ieee80211_sta *sta)
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
struct ieee80211_sta_ht_cap *ht_cap;
u8 mode = 0;
- if (!is_mt7921(dev))
+ if (is_connac_v1(dev))
return 0x38;
if (sta) {
return mode;
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
-static const struct ieee80211_sta_he_cap *
+const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
enum nl80211_band band = phy->chandef.chan->band;
return ieee80211_get_he_iftype_cap(sband, vif->type);
}
+EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
+static int
+mt76_connac_mcu_sta_key_tlv(struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct sk_buff *skb,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct sta_rec_sec *sec;
+ u32 len = sizeof(*sec);
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
+ sec = (struct sta_rec_sec *)tlv;
+ sec->add = cmd;
+
+ if (cmd == SET_KEY) {
+ struct sec_key *sec_key;
+ u8 cipher;
+
+ cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ if (cipher == MCU_CIPHER_NONE)
+ return -EOPNOTSUPP;
+
+ sec_key = &sec->key[0];
+ sec_key->cipher_len = sizeof(*sec_key);
+
+ if (cipher == MCU_CIPHER_BIP_CMAC_128) {
+ sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
+ sec_key->key_id = sta_key_conf->keyidx;
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, sta_key_conf->key, 16);
+
+ sec_key = &sec->key[1];
+ sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
+ sec_key->cipher_len = sizeof(*sec_key);
+ sec_key->key_len = 16;
+ memcpy(sec_key->key, key->key, 16);
+ sec->n_cipher = 2;
+ } else {
+ sec_key->cipher_id = cipher;
+ sec_key->key_id = key->keyidx;
+ sec_key->key_len = key->keylen;
+ memcpy(sec_key->key, key->key, key->keylen);
+
+ if (cipher == MCU_CIPHER_TKIP) {
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(sec_key->key + 16, key->key + 24, 8);
+ memcpy(sec_key->key + 24, key->key + 16, 8);
+ }
+
+ /* store key_conf for BIP batch update */
+ if (cipher == MCU_CIPHER_AES_CCMP) {
+ memcpy(sta_key_conf->key, key->key, key->keylen);
+ sta_key_conf->keyidx = key->keyidx;
+ }
+
+ len -= sizeof(*sec_key);
+ sec->n_cipher = 1;
+ }
+ } else {
+ len -= sizeof(sec->key);
+ sec->n_cipher = 0;
+ }
+ sec->len = cpu_to_le16(len);
+
+ return 0;
+}
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = mt76_connac_mcu_sta_key_tlv(sta_key_conf, skb, key, cmd);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_add_key);
+
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif)
+{
+ struct bss_info_ext_bss *ext;
+ int ext_bss_idx, tsf_offset;
+ struct tlv *tlv;
+
+ ext_bss_idx = mvif->omac_idx - EXT_BSSID_START;
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
+
+ ext = (struct bss_info_ext_bss *)tlv;
+ tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+ ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_ext_tlv);
+
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u8 wlan_idx,
+ bool enable)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
+ struct bss_info_basic *bss;
+ struct tlv *tlv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MONITOR:
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (enable) {
+ rcu_read_lock();
+ if (!sta)
+ sta = ieee80211_find_sta(vif,
+ vif->bss_conf.bssid);
+ /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
+ if (sta) {
+ struct mt76_wcid *wcid;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wlan_idx = wcid->idx;
+ }
+ rcu_read_unlock();
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ type = NETWORK_IBSS;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
+
+ bss = (struct bss_info_basic *)tlv;
+ bss->network_type = cpu_to_le32(type);
+ bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
+ bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
+ bss->wmm_idx = mvif->wmm_idx;
+ bss->active = enable;
+ bss->cipher = mvif->cipher;
+
+ if (vif->type != NL80211_IFTYPE_MONITOR) {
+ struct cfg80211_chan_def *chandef = &phy->chandef;
+
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ bss->phy_mode = mt76_connac_get_phy_mode(phy, vif,
+ chandef->chan->band, NULL);
+ } else {
+ memcpy(bss->bssid, phy->macaddr, ETH_ALEN);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_basic_tlv);
+
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter)
+{
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx_lo;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 wlan_idx_hi;
+ u8 rsv[2];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = enter ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = band,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(PM_STATE_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_pm);
+
+int mt76_connac_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 power_mode;
+ u8 rsv[3];
+ } req = {
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_restart);
+
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val)
+{
+ struct {
+ u8 ctrl;
+ u8 rdd_idx;
+ u8 rdd_rx_sel;
+ u8 val;
+ u8 rsv[4];
+ } __packed req = {
+ .ctrl = cmd,
+ .rdd_idx = index,
+ .rdd_rx_sel = rx_sel,
+ .val = val,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_EXT_CMD(SET_RDD_CTRL), &req,
+ sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_rdd_cmd);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
#include "mt76_connac.h"
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+#define FW_FEATURE_ENCRY_MODE BIT(4)
+#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_VALID_RAM_ENTRY BIT(5)
+#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
+#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
+#define PATCH_SEC_TYPE_INFO 0x2
+
struct tlv {
__le16 tag;
__le16 len;
sizeof(struct sta_rec_muru) + \
sizeof(struct sta_rec_bfee) + \
sizeof(struct sta_rec_ra) + \
+ sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
MCU_EXT_CMD_SCS_CTRL = 0x82,
MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
+ MCU_EXT_CMD_OFFCH_SCAN_CTRL = 0x9a,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
MCU_EXT_CMD_MURU_CTRL = 0x9f,
MCU_EXT_CMD_SET_SPR = 0xa8,
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
MCU_CE_CMD_CANCEL_HW_SCAN = 0x1b,
- MCU_CE_CMD_SET_ROC = 0x1d,
+ MCU_CE_CMD_SET_ROC = 0x1c,
+ MCU_CE_CMD_SET_EDCA_PARMS = 0x1d,
MCU_CE_CMD_SET_P2P_OPPPS = 0x33,
MCU_CE_CMD_SET_RATE_TX_POWER = 0x5d,
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
u8 data[320];
} __packed;
+static inline enum mcu_cipher_type
+mt76_connac_mcu_get_cipher(int cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MCU_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MCU_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return MCU_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ return MCU_CIPHER_BIP_CMAC_128;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MCU_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MCU_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MCU_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MCU_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MCU_CIPHER_WAPI;
+ default:
+ return MCU_CIPHER_NONE;
+ }
+}
+
+static inline u32
+mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
+{
+ u32 ret = 0;
+
+ ret |= feature_set & FW_FEATURE_SET_ENCRYPT ?
+ DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV : 0;
+ if (is_mt7921(dev))
+ ret |= feature_set & FW_FEATURE_ENCRY_MODE ?
+ DL_CONFIG_ENCRY_MODE_SEL : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
{
*wlan_idx_hi = 0;
- if (is_mt7921(dev)) {
+ if (!is_connac_v1(dev)) {
*wlan_idx_lo = wcid ? to_wcid_lo(wcid->idx) : 0;
*wlan_idx_hi = wcid ? to_wcid_hi(wcid->idx) : 0;
} else {
}
struct sk_buff *
+__mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
+ struct mt76_wcid *wcid, int len);
+static inline struct sk_buff *
mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
- struct mt76_wcid *wcid);
+ struct mt76_wcid *wcid)
+{
+ return __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+}
+
struct wtbl_req_hdr *
mt76_connac_mcu_alloc_wtbl_req(struct mt76_dev *dev, struct mt76_wcid *wcid,
int cmd, void *sta_wtbl, struct sk_buff **skb);
int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid, int cmd);
+int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct ieee80211_sta *sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 state);
void mt76_connac_mcu_wtbl_ht_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv);
+ void *wtbl_tlv, bool ldpc);
void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx, void *sta_wtbl,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
struct ieee80211_ampdu_params *params,
- bool enable, bool tx);
+ int cmd, bool enable, bool tx);
int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
struct ieee80211_vif *vif,
struct mt76_wcid *wcid,
struct ieee80211_vif *vif);
u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+
+const struct ieee80211_sta_he_cap *
+mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
+u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band, struct ieee80211_sta *sta);
+
+int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
+ struct mt76_connac_sta_key_conf *sta_key_conf,
+ struct ieee80211_key_conf *key, int mcu_cmd,
+ struct mt76_wcid *wcid, enum set_key_cmd cmd);
+
+void mt76_connac_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt76_vif *mvif);
+void mt76_connac_mcu_bss_omac_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif);
+int mt76_connac_mcu_bss_basic_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct mt76_phy *phy, u8 wlan_idx,
+ bool enable);
+void mt76_connac_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt76_connac_mcu_wtbl_smps_tlv(struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ void *sta_wtbl, void *wtbl_tlv);
+int mt76_connac_mcu_set_pm(struct mt76_dev *dev, int band, int enter);
+int mt76_connac_mcu_restart(struct mt76_dev *dev);
+int mt76_connac_mcu_rdd_cmd(struct mt76_dev *dev, int cmd, u8 index,
+ u8 rx_sel, u8 val);
#endif /* __MT76_CONNAC_MCU_H */
u8 tbtt_count;
u32 tx_hang_reset;
- u8 tx_hang_check;
+ u8 tx_hang_check[4];
+ u8 beacon_hang_check;
u8 mcu_timeout;
struct mt76x02_calibration cal;
void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
{
- struct cfg80211_chan_def *chandef = &dev->mphy.chandef;
-
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- dev->mt76.region != NL80211_DFS_UNSET) {
+ if (mt76_phy_dfs_state(&dev->mphy) > MT_DFS_STATE_DISABLED) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
static void mt76x02_check_mac_err(struct mt76x02_dev *dev)
{
- u32 val = mt76_rr(dev, 0x10f4);
+ if (dev->mt76.beacon_mask) {
+ if (mt76_rr(dev, MT_TX_STA_0) & MT_TX_STA_0_BEACONS) {
+ dev->beacon_hang_check = 0;
+ return;
+ }
- if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
- return;
+ if (++dev->beacon_hang_check < 10)
+ return;
+
+ dev->beacon_hang_check = 0;
+ } else {
+ u32 val = mt76_rr(dev, 0x10f4);
+ if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5))))
+ return;
+ }
+
+ dev_err(dev->mt76.dev, "MAC error detected\n");
- dev_err(dev->mt76.dev, "mac specific condition occurred\n");
+ mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
+ mt76x02_wait_for_txrx_idle(&dev->mt76);
mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR);
udelay(10);
dev->mt76.aggr_stats[idx++] += val >> 16;
}
- if (!dev->mt76.beacon_mask)
- mt76x02_check_mac_err(dev);
+ mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
mt76x02_edcca_check(dev);
for (i = 0; i < 4; i++) {
q = dev->mphy.q_tx[i];
- if (!q->queued)
- continue;
-
prev_dma_idx = dev->mt76.tx_dma_idx[i];
dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
- if (prev_dma_idx == dma_idx)
- break;
+ if (!q->queued || prev_dma_idx != dma_idx) {
+ dev->tx_hang_check[i] = 0;
+ continue;
+ }
+
+ if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
+ return true;
}
- return i < 4;
+ return false;
}
static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (test_bit(MT76_RESTART, &dev->mphy.state))
return;
- if (mt76x02_tx_hang(dev)) {
- if (++dev->tx_hang_check >= MT_TX_HANG_TH)
- goto restart;
- } else {
- dev->tx_hang_check = 0;
- }
-
- if (dev->mcu_timeout)
- goto restart;
-
- return;
+ if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
+ return;
-restart:
mt76x02_watchdog_reset(dev);
dev->tx_hang_reset++;
- dev->tx_hang_check = 0;
+ memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
memset(dev->mt76.tx_dma_idx, 0xff,
sizeof(dev->mt76.tx_dma_idx));
}
#define MT_RX_STAT_2_OVERFLOW_ERRORS GENMASK(31, 16)
#define MT_TX_STA_0 0x170c
+#define MT_TX_STA_0_BEACONS GENMASK(31, 16)
+
#define MT_TX_STA_1 0x1710
#define MT_TX_STA_2 0x1714
# SPDX-License-Identifier: ISC
config MT7915E
tristate "MediaTek MT7915E (PCIe) support"
- select MT76_CORE
+ select MT76_CONNAC_LIB
depends on MAC80211
depends on PCI
+ select RELAY
help
This adds support for MT7915-based wireless PCIe devices,
which support concurrent dual-band operation at both 5GHz
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/relay.h>
#include "mt7915.h"
#include "eeprom.h"
#include "mcu.h"
+#include "mac.h"
+
+#define FW_BIN_LOG_MAGIC 0x44e98caf
/** global debugfs **/
{
struct mt7915_dev *dev = data;
- return mt7915_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE, 1, 0, 0);
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
+ val, 0, 0);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
}
DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
+static int
+mt7915_rdd_monitor(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
+ const char *bw;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!cfg80211_chandef_valid(chandef)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!dev->rdd2_phy) {
+ seq_puts(s, "not running\n");
+ goto out;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = "40";
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = "80";
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = "160";
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = "80P80";
+ break;
+ default:
+ bw = "20";
+ break;
+ }
+
+ seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
+ chandef->chan->hw_value, chandef->chan->center_freq,
+ bw, chandef->center_freq1);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
static int
mt7915_fw_debug_wm_set(void *data, u64 val)
{
DEBUG_SPL,
DEBUG_RPT_RX,
} debug;
+ bool tx, rx, en;
int ret;
dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
- ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm);
+ if (dev->fw_debug_bin)
+ val = 16;
+ else
+ val = dev->fw_debug_wm;
+
+ tx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(1));
+ rx = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(2));
+ en = dev->fw_debug_wm || (dev->fw_debug_bin & BIT(0));
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
if (ret)
return ret;
for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
- ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm);
+ if (debug == DEBUG_RPT_RX)
+ val = en && rx;
+ else
+ val = en && tx;
+
+ ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
if (ret)
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
mt7915_fw_debug_wa_set, "%lld\n");
+static struct dentry *
+create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
+ struct rchan_buf *buf, int *is_global)
+{
+ struct dentry *f;
+
+ f = debugfs_create_file("fwlog_data", mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(f))
+ return NULL;
+
+ *is_global = 1;
+
+ return f;
+}
+
+static int
+remove_buf_file_cb(struct dentry *f)
+{
+ debugfs_remove(f);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_bin_set(void *data, u64 val)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = create_buf_file_cb,
+ .remove_buf_file = remove_buf_file_cb,
+ };
+ struct mt7915_dev *dev = data;
+
+ if (!dev->relay_fwlog)
+ dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
+ 1500, 512, &relay_cb, NULL);
+ if (!dev->relay_fwlog)
+ return -ENOMEM;
+
+ dev->fw_debug_bin = val;
+
+ relay_reset(dev->relay_fwlog);
+
+ return mt7915_fw_debug_wm_set(dev, dev->fw_debug_wm);
+}
+
+static int
+mt7915_fw_debug_bin_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw_debug_bin;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7915_fw_debug_bin_get,
+ mt7915_fw_debug_bin_set, "%lld\n");
+
static int
mt7915_fw_util_wm_show(struct seq_file *file, void *data)
{
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
static void
-mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
+mt7915_hw_queue_read(struct seq_file *s, u32 size,
const struct hw_queue_map *map)
{
struct mt7915_phy *phy = s->private;
struct mt7915_dev *dev = phy->dev;
u32 i, val;
- val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
+ val = mt76_rr(dev, MT_FL_Q_EMPTY);
for (i = 0; i < size; i++) {
u32 ctrl, head, tail, queued;
continue;
ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
- mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
- head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ head = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(11, 0));
- tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
GENMASK(27, 16));
- queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
+ queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\t%s: ", map[i].name);
if (val & BIT(offs))
continue;
- mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
- qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
GENMASK(11, 0));
seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
sta->addr, msta->wcid.idx,
val, head, tail);
seq_puts(file, "PLE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
&ple_queue_map[0]);
/* iterate per-sta ple queue */
mt7915_sta_hw_queue_read, file);
/* pse queue */
seq_puts(file, "PSE non-empty queue info:\n");
- mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
+ mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
&pse_queue_map[0]);
return 0;
debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
debugfs_create_file("fw_util_wm", 0400, dir, dev,
&mt7915_fw_util_wm_fops);
debugfs_create_file("fw_util_wa", 0400, dir, dev,
&dev->hw_pattern);
debugfs_create_file("radar_trigger", 0200, dir, dev,
&fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7915_rdd_monitor);
}
+ if (!ext_phy)
+ dev->debugfs_dir = dir;
+
return 0;
}
+static void
+mt7915_debugfs_write_fwlog(struct mt7915_dev *dev, const void *hdr, int hdrlen,
+ const void *data, int len)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ void *dest;
+
+ spin_lock_irqsave(&lock, flags);
+ dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
+ if (dest) {
+ *(u32 *)dest = hdrlen + len;
+ dest += 4;
+
+ if (hdrlen) {
+ memcpy(dest, hdr, hdrlen);
+ dest += hdrlen;
+ }
+
+ memcpy(dest, data, len);
+ relay_flush(dev->relay_fwlog);
+ }
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len)
+{
+ struct {
+ __le32 magic;
+ __le32 timestamp;
+ __le16 msg_type;
+ __le16 len;
+ } hdr = {
+ .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
+ .msg_type = PKT_TYPE_RX_FW_MONITOR,
+ };
+
+ if (!dev->relay_fwlog)
+ return;
+
+ hdr.timestamp = mt76_rr(dev, MT_LPON_FRCR(0));
+ hdr.len = *(__le16 *)data;
+ mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
+}
+
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len)
+{
+ if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
+ return false;
+
+ if (dev->relay_fwlog)
+ mt7915_debugfs_write_fwlog(dev, NULL, 0, data, len);
+
+ return true;
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
#include "../dma.h"
#include "mac.h"
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc)
+int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
{
int i, err;
- err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, MT_TX_RING_BASE);
+ err = mt76_init_tx_queue(phy->mt76, 0, idx, n_desc, ring_base);
if (err < 0)
return err;
return 0;
}
+static void mt7915_dma_config(struct mt7915_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+ } while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ if (is_mt7915(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0, MT7915_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM, MT7915_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA, MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1, MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT, MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN, MT7915_RXQ_MCU_WA);
+ TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916, MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM, MT7916_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA, MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_EXT, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916, MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_EXT_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916, MT7916_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916, MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM, MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916, MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL, MT7915_TXQ_FWDL);
+ }
+}
+
static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
{
-#define PREFETCH(base, depth) ((base) << 16 | (depth))
-
- mt76_wr(dev, MT_WFDMA0_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x0, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x40, 0x4));
- mt76_wr(dev, MT_WFDMA0_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x80, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING0_EXT_CTRL + ofs, PREFETCH(0x80, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING1_EXT_CTRL + ofs, PREFETCH(0xc0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING2_EXT_CTRL + ofs, PREFETCH(0x100, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING3_EXT_CTRL + ofs, PREFETCH(0x140, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING4_EXT_CTRL + ofs, PREFETCH(0x180, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING5_EXT_CTRL + ofs, PREFETCH(0x1c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING6_EXT_CTRL + ofs, PREFETCH(0x200, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING7_EXT_CTRL + ofs, PREFETCH(0x240, 0x4));
-
- mt76_wr(dev, MT_WFDMA1_TX_RING16_EXT_CTRL + ofs, PREFETCH(0x280, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING17_EXT_CTRL + ofs, PREFETCH(0x2c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING18_EXT_CTRL + ofs, PREFETCH(0x300, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING19_EXT_CTRL + ofs, PREFETCH(0x340, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING20_EXT_CTRL + ofs, PREFETCH(0x380, 0x4));
- mt76_wr(dev, MT_WFDMA1_TX_RING21_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x0));
-
- mt76_wr(dev, MT_WFDMA1_RX_RING0_EXT_CTRL + ofs, PREFETCH(0x3c0, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING1_EXT_CTRL + ofs, PREFETCH(0x400, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING2_EXT_CTRL + ofs, PREFETCH(0x440, 0x4));
- mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL + ofs, PREFETCH(0x480, 0x0));
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ u32 base = 0;
+
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
+
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU) + ofs, PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MCU_WA) + ofs, PREFETCH(0x180, 0x4));
+ if (!is_mt7915(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN_WA) + ofs, PREFETCH(0x1c0, 0x4));
+ base = 0x40;
+ }
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x1c0 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_MAIN) + ofs, PREFETCH(0x200 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x240 + base, 0x4));
+
+ /* for mt7915, the ring which is next the last
+ * used ring must be initialized.
+ */
+ if (is_mt7915(&dev->mt76)) {
+ ofs += 0x4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x140, 0x0));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT_WA) + ofs, PREFETCH(0x200 + base, 0x0));
+ mt76_wr(dev, MT_RXQ_EXT_CTRL(MT_RXQ_EXT) + ofs, PREFETCH(0x280 + base, 0x0));
+ }
}
void mt7915_dma_prefetch(struct mt7915_dev *dev)
{
__mt7915_dma_prefetch(dev, 0);
if (dev->hif2)
- __mt7915_dma_prefetch(dev, MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE);
+ __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
}
-int mt7915_dma_init(struct mt7915_dev *dev)
+static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
{
+ struct mt76_dev *mdev = &dev->mt76;
u32 hif1_ofs = 0;
- int ret;
-
- mt76_dma_attach(&dev->mt76);
if (dev->hif2)
- hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset */
+ if (rst) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+ }
+ }
+
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
- /* configure global setting */
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
- MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+static int mt7915_dma_enable(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
/* reset dma idx */
mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ }
- /* configure delay interrupt */
+ /* configure delay interrupt off */
mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+ }
+
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
+ hif1_ofs, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
+ hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
+ hif1_ofs, 0);
+ }
+ }
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
if (dev->hif2) {
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+ }
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ /* set WFDMA Tx/Rx */
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
- mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
- mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
- mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
}
- /* configure perfetch settings */
- mt7915_dma_prefetch(dev);
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_RX_DONE_MCU |
+ MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD |
+ MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ mt7915_irq_enable(dev, irq_mask);
+
+ return 0;
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7915_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7915_dma_disable(dev, true);
/* init tx queue */
- ret = mt7915_init_tx_queues(&dev->phy, MT7915_TXQ_BAND0,
- MT7915_TX_RING_SIZE);
+ ret = mt7915_init_tx_queues(&dev->phy,
+ MT_TXQ_ID(0),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0));
if (ret)
return ret;
/* command to WM */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7915_TXQ_MCU_WM,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
if (ret)
return ret;
/* command to WA */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA, MT7915_TXQ_MCU_WA,
- MT7915_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
if (ret)
return ret;
/* firmware download */
- ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7915_TXQ_FWDL,
- MT7915_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7915_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
if (ret)
return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
- MT7915_RXQ_MCU_WM, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
if (ret)
return ret;
/* event from WA */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
- MT7915_RXQ_MCU_WA, MT7915_RX_MCU_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MCU_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU_WA));
if (ret)
return ret;
- /* rx data queue */
+ /* rx data queue for band0 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
- MT7915_RXQ_BAND0, MT7915_RX_RING_SIZE,
- MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
if (ret)
return ret;
+ /* tx free notify event from WA for band0 */
+ if (!is_mt7915(mdev)) {
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ MT_RXQ_ID(MT_RXQ_MAIN_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA));
+ if (ret)
+ return ret;
+ }
+
if (dev->dbdc_support) {
+ /* rx data queue for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT],
- MT7915_RXQ_BAND1, MT7915_RX_RING_SIZE,
+ MT_RXQ_ID(MT_RXQ_EXT),
+ MT7915_RX_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_DATA_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_EXT) + hif1_ofs);
if (ret)
return ret;
- /* event from WA */
+ /* tx free notify event from WA for band1 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_EXT_WA],
- MT7915_RXQ_MCU_WA_EXT,
+ MT_RXQ_ID(MT_RXQ_EXT_WA),
MT7915_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
- MT_RX_EVENT_RING_BASE + hif1_ofs);
+ MT_RXQ_RING_BASE(MT_RXQ_EXT_WA) + hif1_ofs);
if (ret)
return ret;
}
mt7915_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
- /* hif wait WFDMA idle */
- mt76_set(dev, MT_WFDMA0_BUSY_ENA,
- MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_BUSY_ENA,
- MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA0_PCIE1_BUSY_ENA,
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_set(dev, MT_WFDMA1_PCIE1_BUSY_ENA,
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
- MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
- MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
-
- mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
- MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
-
- /* set WFDMA Tx/Rx */
- mt76_set(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_set(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- if (dev->hif2) {
- mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
- (MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
- (MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN));
- mt76_set(dev, MT_WFDMA_HOST_CONFIG,
- MT_WFDMA_HOST_CONFIG_PDMA_BAND);
- }
-
- /* enable interrupts for TX/RX rings */
- mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
- MT_INT_MCU_CMD);
+ mt7915_dma_enable(dev);
return 0;
}
void mt7915_dma_cleanup(struct mt7915_dev *dev)
{
- /* disable */
- mt76_clear(dev, MT_WFDMA0_GLO_CFG,
- MT_WFDMA0_GLO_CFG_TX_DMA_EN |
- MT_WFDMA0_GLO_CFG_RX_DMA_EN);
- mt76_clear(dev, MT_WFDMA1_GLO_CFG,
- MT_WFDMA1_GLO_CFG_TX_DMA_EN |
- MT_WFDMA1_GLO_CFG_RX_DMA_EN);
-
- /* reset */
- mt76_clear(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA1_RST,
- MT_WFDMA1_RST_DMASHDL_ALL_RST |
- MT_WFDMA1_RST_LOGIC_RST);
-
- mt76_clear(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
-
- mt76_set(dev, MT_WFDMA0_RST,
- MT_WFDMA0_RST_DMASHDL_ALL_RST |
- MT_WFDMA0_RST_LOGIC_RST);
+ mt7915_dma_disable(dev, true);
mt76_dma_cleanup(&dev->mt76);
}
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
u32 val = eeprom[MT_EE_DO_PRE_CAL];
+ u32 offs;
if (!dev->flash_mode)
return 0;
if (!dev->cal)
return -ENOMEM;
- return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val);
+ offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
+
+ return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
switch (val) {
case 0x7915:
+ case 0x7916:
return 0;
default:
return -EINVAL;
if (dev->dbdc_support)
default_bin = MT7915_EEPROM_DEFAULT_DBDC;
+ if (!is_mt7915(&dev->mt76))
+ default_bin = MT7916_EEPROM_DEFAULT;
+
ret = request_firmware(&fw, default_bin, dev->mt76.dev);
if (ret)
return ret;
goto out;
}
- memcpy(eeprom, fw->data, MT7915_EEPROM_SIZE);
+ memcpy(eeprom, fw->data, mt7915_eeprom_size(dev));
dev->flash_mode = true;
out:
static int mt7915_eeprom_load(struct mt7915_dev *dev)
{
int ret;
+ u16 eeprom_size = mt7915_eeprom_size(dev);
- ret = mt76_eeprom_init(&dev->mt76, MT7915_EEPROM_SIZE);
+ ret = mt76_eeprom_init(&dev->mt76, eeprom_size);
if (ret < 0)
return ret;
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7915_EEPROM_SIZE,
+ block_num = DIV_ROUND_UP(eeprom_size,
MT7915_EEPROM_BLOCK_SIZE);
for (i = 0; i < block_num; i++)
mt7915_mcu_get_eeprom(dev,
return mt7915_check_eeprom(dev);
}
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
}
}
-static void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev)
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy)
{
u8 nss, nss_band, *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+ bool ext_phy = phy != &dev->phy;
- mt7915_eeprom_parse_band_config(&dev->phy);
+ mt7915_eeprom_parse_band_config(phy);
+
+ /* read tx/rx mask from eeprom */
+ if (is_mt7915(&dev->mt76)) {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF]);
+ } else {
+ nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF + ext_phy]);
+ }
- /* read tx mask from eeprom */
- nss = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH, eeprom[MT_EE_WIFI_CONF]);
if (!nss || nss > 4)
nss = 4;
+ /* read tx/rx stream */
nss_band = nss;
-
if (dev->dbdc_support) {
- nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
- eeprom[MT_EE_WIFI_CONF + 3]);
+ if (is_mt7915(&dev->mt76)) {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (ext_phy)
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ } else {
+ nss_band = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+ eeprom[MT_EE_WIFI_CONF + 2 + ext_phy]);
+ }
+
if (!nss_band || nss_band > 2)
nss_band = 2;
+ }
- if (nss_band >= nss)
- nss = 4;
+ if (nss_band > nss) {
+ dev_err(dev->mt76.dev,
+ "nss mismatch, nss(%d) nss_band(%d) ext_phy(%d)\n",
+ nss, nss_band, ext_phy);
+ nss = nss_band;
}
- dev->chainmask = BIT(nss) - 1;
- dev->mphy.antenna_mask = BIT(nss_band) - 1;
- dev->mphy.chainmask = dev->mphy.antenna_mask;
+ mphy->chainmask = ext_phy ? (BIT(nss_band) - 1) << 2 : (BIT(nss_band) - 1);
+ mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
+ dev->chainmask |= mphy->chainmask;
}
int mt7915_eeprom_init(struct mt7915_dev *dev)
if (ret)
return ret;
- mt7915_eeprom_parse_hw_cap(dev);
+ mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
tssi_on = mt7915_tssi_enabled(dev, chan->band);
if (chan->band == NL80211_BAND_2GHZ) {
- index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ u32 power = is_mt7915(&dev->mt76) ?
+ MT_EE_TX0_POWER_2G : MT_EE_TX0_POWER_2G_V2;
+
+ index = power + chain_idx * 3;
target_power = eeprom[index];
if (!tssi_on)
target_power += eeprom[index + 1];
} else {
int group = mt7915_get_channel_group(chan->hw_value);
+ u32 power = is_mt7915(&dev->mt76) ?
+ MT_EE_TX0_POWER_5G : MT_EE_TX0_POWER_5G_V2;
- index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ index = power + chain_idx * 12;
target_power = eeprom[index + group];
if (!tssi_on)
u8 *eeprom = dev->mt76.eeprom.data;
u32 val;
s8 delta;
+ u32 rate_2g, rate_5g;
+
+ rate_2g = is_mt7915(&dev->mt76) ?
+ MT_EE_RATE_DELTA_2G : MT_EE_RATE_DELTA_2G_V2;
+
+ rate_5g = is_mt7915(&dev->mt76) ?
+ MT_EE_RATE_DELTA_5G : MT_EE_RATE_DELTA_5G_V2;
if (band == NL80211_BAND_2GHZ)
- val = eeprom[MT_EE_RATE_DELTA_2G];
+ val = eeprom[rate_2g];
else
- val = eeprom[MT_EE_RATE_DELTA_5G];
+ val = eeprom[rate_5g];
if (!(val & MT_EE_RATE_DELTA_EN))
return 0;
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
+ MT_EE_RATE_DELTA_5G_V2 = 0x81e,
+ MT_EE_TX0_POWER_2G_V2 = 0x441,
+ MT_EE_TX0_POWER_5G_V2 = 0x445,
MT_EE_ADIE_FT_VERSION = 0x9a0,
__MT_EE_MAX = 0xe00,
+ __MT_EE_MAX_V2 = 0x1000,
/* 0xe10 ~ 0x5780 used to save group cal data */
- MT_EE_PRECAL = 0xe10
+ MT_EE_PRECAL = 0xe10,
+ MT_EE_PRECAL_V2 = 0x1010
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt76_phy *mphy = hw->priv;
struct mt7915_phy *phy = mphy->priv;
- struct cfg80211_chan_def *chandef = &mphy->chandef;
memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
dev->mt76.region = request->dfs_region;
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7915_mcu_rdd_background_enable(phy, NULL);
+
mt7915_init_txpower(dev, &mphy->sband_2g.sband);
mt7915_init_txpower(dev, &mphy->sband_5g.sband);
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
+ mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
mt7915_dfs_init_radar_detector(phy);
}
mt7915_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
+ struct mt7915_dev *dev = phy->dev;
hw->queues = 4;
hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ if (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background"))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
phy->mt76->sband_5g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
- IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (is_mt7915(&dev->mt76)) {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ } else {
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
+ phy->mt76->sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ }
}
mt76_set_stream_caps(phy->mt76, true);
mt7915_set_stream_vht_txbf_caps(phy);
mt7915_set_stream_he_caps(phy);
+
+ wiphy->available_antennas_rx = phy->mt76->antenna_mask;
+ wiphy->available_antennas_tx = phy->mt76->antenna_mask;
}
static void
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
- /* disable rx rate report by default due to hw issues */
+
+ /* mt7915: disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
static void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
+ u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
+
+ /* config pse qid6 wfdma port selection */
+ if (!is_mt7915(&dev->mt76) && dev->hif2)
+ mt76_rmw(dev, MT_WF_PP_TOP_RXQ_WFDMA_CF_5, 0,
+ MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK);
+
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
- mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
- for (i = 0; i < MT7915_WTBL_SIZE; i++)
+ for (i = 0; i < mt7915_wtbl_size(dev); i++)
mt7915_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
phy = mphy->priv;
phy->dev = dev;
phy->mt76 = mphy;
- mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
- mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
- mt7915_eeprom_parse_band_config(phy);
- mt7915_init_wiphy(mphy->hw);
+ mt7915_eeprom_parse_hw_cap(dev, phy);
memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
ETH_ALEN);
+ /* Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ if (!is_valid_ether_addr(mphy->macaddr)) {
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mphy->macaddr[0] |= 2;
+ mphy->macaddr[0] ^= BIT(7);
+ }
mt76_eeprom_override(mphy);
- ret = mt7915_init_tx_queues(phy, MT7915_TXQ_BAND1,
- MT7915_TX_RING_SIZE);
+ /* init wiphy according to mphy and phy */
+ mt7915_init_wiphy(mphy->hw);
+ ret = mt7915_init_tx_queues(phy, MT_TXQ_ID(1),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(1));
if (ret)
goto error;
static void mt7915_wfsys_reset(struct mt7915_dev *dev)
{
- u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
-
#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
- mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
+ if (is_mt7915(&dev->mt76)) {
+ u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
- /* change to software control */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
- /* reset wfsys */
- val &= ~MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* change to software control */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* release wfsys then mcu re-excutes romcode */
- val |= MT_TOP_PWR_SW_RST;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* reset wfsys */
+ val &= ~MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* switch to hw control */
- val &= ~MT_TOP_PWR_SW_RST;
- val |= MT_TOP_PWR_HW_CTRL;
- mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+ /* release wfsys then mcu re-excutes romcode */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* check whether mcu resets to default */
- if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_DEFAULT,
- MT_MCU_DUMMY_DEFAULT, 1000)) {
- dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
- return;
- }
+ /* switch to hw control */
+ val &= ~MT_TOP_PWR_SW_RST;
+ val |= MT_TOP_PWR_HW_CTRL;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
- /* wfsys reset won't clear host registers */
- mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+ /* check whether mcu resets to default */
+ if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR,
+ MT_MCU_DUMMY_DEFAULT, MT_MCU_DUMMY_DEFAULT,
+ 1000)) {
+ dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
+ return;
+ }
+
+ /* wfsys reset won't clear host registers */
+ mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
- msleep(100);
+ msleep(100);
+ } else {
+ mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+
+ mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+ }
}
static int mt7915_init_hardware(struct mt7915_dev *dev)
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
INIT_WORK(&dev->init_work, mt7915_init_work);
- dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5));
+
+ dev->dbdc_support = is_mt7915(&dev->mt76) ?
+ !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
/* If MCU was already running, it is likely in a bad state */
if (mt76_get_field(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE) >
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
- /*
- * force firmware operation mode into normal state,
- * which should be set before firmware download stage.
- */
- mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
-
ret = mt7915_mcu_init(dev);
if (ret) {
/* Reset and try again */
if (ret < 0)
return ret;
-
if (dev->flash_mode) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
mt7915_init_wiphy(hw);
- if (!dev->dbdc_support)
- dev->mphy.sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
-
- dev->mphy.hw->wiphy->available_antennas_rx = dev->mphy.chainmask;
- dev->mphy.hw->wiphy->available_antennas_tx = dev->mphy.chainmask;
- dev->phy.dfs_state = -1;
-
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7915_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+ HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
static int mt7915_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7915_sta *msta = (struct mt7915_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
__le32 qos_ctrl, ht_ctrl;
if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
return 0;
}
+static int
+mt7915_mac_fill_rx_rate(struct mt7915_dev *dev,
+ struct mt76_rx_status *status,
+ struct ieee80211_supported_band *sband,
+ __le32 *rxv)
+{
+ u32 v0, v2;
+ u8 stbc, gi, bw, dcm, mode, nss;
+ int i, idx;
+ bool cck = false;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v2 = le32_to_cpu(rxv[2]);
+
+ idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
+ nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
+
+ if (!is_mt7915(&dev->mt76)) {
+ stbc = FIELD_GET(MT_PRXV_HT_STBC, v0);
+ gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v0);
+ mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
+ dcm = FIELD_GET(MT_PRXV_DCM, v0);
+ bw = FIELD_GET(MT_PRXV_FRAME_MODE, v0);
+ } else {
+ stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
+ gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
+ mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
+ dcm = !!(idx & GENMASK(3, 0) & MT_PRXV_TX_DCM);
+ bw = FIELD_GET(MT_CRXV_FRAME_MODE, v2);
+ }
+
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ i = mt76_get_rate(&dev->mt76, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = nss;
+ status->encoding = RX_ENC_VHT;
+ if (gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (i > 9)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_HE_MU:
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ i &= GENMASK(3, 0);
+
+ if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
+ status->he_gi = gi;
+
+ status->he_dcm = dcm;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (bw) {
+ case IEEE80211_STA_RX_BW_20:
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ if (mode & MT_PHY_TYPE_HE_EXT_SU &&
+ (idx & MT_PRXV_TX_ER_SU_106T)) {
+ status->bw = RATE_INFO_BW_HE_RU;
+ status->he_ru =
+ NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ } else {
+ status->bw = RATE_INFO_BW_40;
+ }
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+ if (mode < MT_PHY_TYPE_HE_SU && gi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ return 0;
+}
+
static int
mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
{
/* RXD Group 3 - P-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
- u32 v0, v1, v2;
+ u32 v0, v1;
+ int ret;
rxv = rxd;
rxd += 2;
v0 = le32_to_cpu(rxv[0]);
v1 = le32_to_cpu(rxv[1]);
- v2 = le32_to_cpu(rxv[2]);
if (v0 & MT_PRXV_HT_AD_CODE)
status->enc_flags |= RX_ENC_FLAG_LDPC;
/* RXD Group 5 - C-RXV */
if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
- u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
- u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
- bool cck = false;
-
rxd += 18;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
+ }
- idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
- mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
-
- switch (mode) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- i = mt76_get_rate(&dev->mt76, sband, i, cck);
- break;
- case MT_PHY_TYPE_HT_GF:
- case MT_PHY_TYPE_HT:
- status->encoding = RX_ENC_HT;
- if (i > 31)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_VHT:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_VHT;
- if (i > 9)
- return -EINVAL;
- break;
- case MT_PHY_TYPE_HE_MU:
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- status->nss =
- FIELD_GET(MT_PRXV_NSTS, v0) + 1;
- status->encoding = RX_ENC_HE;
- i &= GENMASK(3, 0);
-
- if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
- status->he_gi = gi;
-
- status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
- break;
- default:
- return -EINVAL;
- }
- status->rate_idx = i;
-
- switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
- case IEEE80211_STA_RX_BW_20:
- break;
- case IEEE80211_STA_RX_BW_40:
- if (mode & MT_PHY_TYPE_HE_EXT_SU &&
- (idx & MT_PRXV_TX_ER_SU_106T)) {
- status->bw = RATE_INFO_BW_HE_RU;
- status->he_ru =
- NL80211_RATE_INFO_HE_RU_ALLOC_106;
- } else {
- status->bw = RATE_INFO_BW_40;
- }
- break;
- case IEEE80211_STA_RX_BW_80:
- status->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_160:
- status->bw = RATE_INFO_BW_160;
- break;
- default:
- return -EINVAL;
- }
-
- status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
- if (mode < MT_PHY_TYPE_HE_SU && gi)
- status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (!is_mt7915(&dev->mt76) ||
+ (is_mt7915(&dev->mt76) &&
+ (rxd1 & MT_RXD1_NORMAL_GROUP_5))) {
+ ret = mt7915_mac_fill_rx_rate(dev, status, sband, rxv);
+ if (ret < 0)
+ return ret;
}
}
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
+ /* drop no data frame */
+ if (fc & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))
+ return -EINVAL;
+
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(fc);
status->qos_ctl = qos_ctl;
} else {
p_fmt = MT_TX_TYPE_CT;
q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
- mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
LIST_HEAD(free_list);
struct sk_buff *skb, *tmp;
void *end = data + len;
- u8 i, count;
- bool wake = false;
+ bool v3, wake = false;
+ u16 total, count = 0;
+ u32 txd = le32_to_cpu(free->txd);
+ u32 *cur_info;
/* clean DMA queues and unmap buffers first */
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
* to the time ack is received or dropped by hw (air + hw queue time).
* Should avoid accessing WTBL to get Tx airtime, and use it instead.
*/
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
- if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ total = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
+ if (WARN_ON_ONCE((void *)&free->info[total >> v3] > end))
return;
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
+ for (cur_info = &free->info[0]; count < total; cur_info++) {
+ u32 msdu, info = le32_to_cpu(*cur_info);
+ u8 i;
/*
* 1'b1: new wcid pair.
struct mt76_wcid *wcid;
u16 idx;
- count++;
idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
wcid = rcu_dereference(dev->mt76.wcid[idx]);
sta = wcid_to_sta(wcid);
continue;
}
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
+ if (v3 && (info & MT_TX_FREE_MPDU_HEADER))
continue;
- mt7915_txwi_free(dev, txwi, sta, &free_list);
+ for (i = 0; i < 1 + v3; i++) {
+ if (v3) {
+ msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
+ if (msdu == MT_TX_FREE_MSDU_ID_V3)
+ continue;
+ } else {
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ }
+ count++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7915_txwi_free(dev, txwi, sta, &free_list);
+ }
}
mt7915_mac_sta_poll(dev);
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
if (pid < MT_PACKET_ID_FIRST)
return;
- if (wcidx >= MT7915_WTBL_SIZE)
+ if (wcidx >= mt7915_wtbl_size(dev))
return;
rcu_read_lock();
for (rxd += 2; rxd + 8 <= end; rxd += 8)
mt7915_mac_add_txs(dev, rxd);
return false;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, data, len);
+ return false;
default:
return true;
}
mt7915_mac_add_txs(dev, rxd);
dev_kfree_skb(skb);
break;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
+ break;
case PKT_TYPE_NORMAL:
if (!mt7915_mac_fill_rx(dev, skb)) {
mt76_rx(&dev->mt76, q, skb);
mt7915_dma_reset(struct mt7915_dev *dev)
{
struct mt76_phy *mphy_ext = dev->mt76.phy2;
- u32 hif1_ofs = MT_WFDMA1_PCIE1_BASE - MT_WFDMA1_BASE;
+ u32 hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
int i;
mt76_clear(dev, MT_WFDMA0_GLO_CFG,
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
int i, aggr0, aggr1, cnt;
+ u32 val;
- mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
+ mib->fcs_err_cnt += is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
- mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
+ mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
- mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
+ mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
mib->rx_ampdu_cnt += cnt;
mib->rx_ampdu_bytes_cnt += cnt;
cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
- mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
+ mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
- mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
+ mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
- mib->rx_vec_queue_overflow_drop_cnt +=
- FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
+ cnt = mt76_rr(dev, MT_MIB_SDRVEC(ext_phy));
+ mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
mib->rx_ba_cnt += cnt;
cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
- mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
+ if (is_mt7915(&dev->mt76))
+ cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
+ mib->tx_pkt_ibf_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916, cnt);
- cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+ cnt = mt76_rr(dev, MT_MIB_SDRMUBF(ext_phy));
mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
}
aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
- for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- u32 val;
-
- val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
- mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
- mib->ack_fail_cnt +=
- FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
-
- val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
- mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
- mib->rts_retries_cnt +=
- FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
-
- val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
- dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr0++] += val >> 16;
+ if (is_mt7915(&dev->mt76)) {
+ for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 4)));
+ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 4)));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr0++] += val >> 16;
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
+ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
+ dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ /* rts count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, (i << 2)));
+ mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* rts retry count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, (i << 2)));
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ba miss count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR2(ext_phy, (i << 2)));
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ack fail count */
+ val = mt76_rr(dev, MT_MIB_MB_BFTF(ext_phy, (i << 2)));
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
+ }
- val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
- dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
- dev->mt76.aggr_stats[aggr1++] += val >> 16;
+ for (i = 0; i < 8; i++) {
+ val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
+ dev->mt76.aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
+ }
}
}
struct mt7915_dev *dev = phy->dev;
if (phy->rdd_state & BIT(0))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
if (phy->rdd_state & BIT(1))
- mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
}
static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
{
int err;
- err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
- return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
}
static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
int err;
/* start CAC */
- err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
{
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
+ enum mt76_dfs_state dfs_state, prev_state;
int err;
- if (dev->mt76.region == NL80211_DFS_UNSET) {
- phy->dfs_state = -1;
- if (phy->rdd_state)
- goto stop;
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+ if (prev_state == dfs_state)
return 0;
- }
- if (test_bit(MT76_SCANNING, &phy->mt76->state))
- return 0;
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7915_dfs_stop_radar_detector(phy);
- if (phy->dfs_state == chandef->chan->dfs_state)
- return 0;
-
- err = mt7915_dfs_init_radar_specs(phy);
- if (err < 0) {
- phy->dfs_state = -1;
+ if (dfs_state == MT_DFS_STATE_DISABLED)
goto stop;
- }
- phy->dfs_state = chandef->chan->dfs_state;
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
- if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
- if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
- return mt7915_dfs_start_radar_detector(phy);
+ err = mt7915_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
- return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
- MT_RX_SEL0, 0);
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
}
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ ext_phy, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
+ }
+
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
stop:
- err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
- MT_RX_SEL0, 0);
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START, ext_phy,
+ MT_RX_SEL0, 0);
if (err < 0)
return err;
mt7915_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
return 0;
}
PKT_TYPE_RETRIEVE,
PKT_TYPE_TXRX_NOTIFY,
PKT_TYPE_RX_EVENT,
+ PKT_TYPE_RX_FW_MONITOR = 0x0c,
};
/* RXD DW1 */
#define MT_PRXV_RCPI2 GENMASK(23, 16)
#define MT_PRXV_RCPI1 GENMASK(15, 8)
#define MT_PRXV_RCPI0 GENMASK(7, 0)
+#define MT_PRXV_HT_SHORT_GI GENMASK(16, 15)
+#define MT_PRXV_HT_STBC GENMASK(23, 22)
+#define MT_PRXV_TX_MODE GENMASK(27, 24)
+#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
+#define MT_PRXV_DCM BIT(17)
+#define MT_PRXV_NUM_RX BIT(20, 18)
/* C-RXV */
#define MT_CRXV_HT_STBC GENMASK(1, 0)
struct mt7915_tx_free {
__le16 rx_byte_cnt;
__le16 ctrl;
- u8 txd_cnt;
- u8 rsv[3];
+ __le32 txd;
__le32 info[];
} __packed __aligned(4);
+#define MT_TX_FREE_VER GENMASK(18, 16)
#define MT_TX_FREE_MSDU_CNT GENMASK(9, 0)
#define MT_TX_FREE_WLAN_ID GENMASK(23, 14)
#define MT_TX_FREE_LATENCY GENMASK(12, 0)
/* 0: success, others: dropped */
-#define MT_TX_FREE_STATUS GENMASK(14, 13)
#define MT_TX_FREE_MSDU_ID GENMASK(30, 16)
#define MT_TX_FREE_PAIR BIT(31)
+#define MT_TX_FREE_MPDU_HEADER BIT(30)
+#define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0)
+
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
running = mt7915_dev_running(dev);
if (!running) {
- ret = mt7915_mcu_set_pm(dev, 0, 0);
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 0, 0);
if (ret)
goto out;
}
if (phy != &dev->phy) {
- ret = mt7915_mcu_set_pm(dev, 1, 0);
+ ret = mt76_connac_mcu_set_pm(&dev->mt76, 1, 0);
if (ret)
goto out;
mt7915_mac_enable_nf(dev, 1);
}
- ret = mt7915_mcu_set_rts_thresh(phy, 0x92b);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ phy != &dev->phy);
if (ret)
goto out;
clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
if (phy != &dev->phy) {
- mt7915_mcu_set_pm(dev, 1, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 1, 1);
mt7915_mcu_set_mac(dev, 1, false, false);
}
if (!mt7915_dev_running(dev)) {
- mt7915_mcu_set_pm(dev, 0, 1);
+ mt76_connac_mcu_set_pm(&dev->mt76, 0, 1);
mt7915_mcu_set_mac(dev, 0, false, false);
}
mt7915_init_bitrate_mask(vif);
memset(&mvif->cap, -1, sizeof(mvif->cap));
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
+
out:
mutex_unlock(&dev->mt76.mutex);
mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
-static void mt7915_init_dfs_state(struct mt7915_phy *phy)
-{
- struct mt76_phy *mphy = phy->mt76;
- struct ieee80211_hw *hw = mphy->hw;
- struct cfg80211_chan_def *chandef = &hw->conf.chandef;
-
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- return;
-
- if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
- return;
-
- if (mphy->chandef.chan->center_freq == chandef->chan->center_freq &&
- mphy->chandef.width == chandef->width)
- return;
-
- phy->dfs_state = -1;
-}
-
int mt7915_set_channel(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &phy->mt76->state);
- mt7915_init_dfs_state(phy);
mt76_set_channel(phy->mt76);
if (dev->flash_mode) {
struct ieee80211_key_conf *key)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
&mvif->sta;
mutex_lock(&dev->mt76.mutex);
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ }
+
if (cmd == SET_KEY)
*wcid_keyidx = idx;
else if (idx == *wcid_keyidx)
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7915_mcu_add_key(dev, vif, msta, key, cmd);
-
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_EXT_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
out:
mutex_unlock(&dev->mt76.mutex);
mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7915_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
int ret;
mutex_lock(&dev->mt76.mutex);
- ret = mt7915_mcu_set_rts_thresh(phy, val);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val, phy != &dev->phy);
mutex_unlock(&dev->mt76.mutex);
return ret;
n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
: mvif->mt76.omac_idx;
/* TSF software read */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_READ);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software overwrite */
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_WRITE);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
mutex_unlock(&dev->mt76.mutex);
}
mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
/* TSF software adjust*/
- mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
- MT_LPON_TCR_SW_ADJUST);
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
mutex_unlock(&dev->mt76.mutex);
}
struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
- if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ if (is_mt7915(&phy->dev->mt76) &&
+ !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
sinfo->rxrate = rxrate;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
else
clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
else
clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
}
static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
mutex_unlock(&dev->mt76.mutex);
}
+static int
+mt7915_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ int ret = -EINVAL;
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ goto out;
+
+ if (dev->rdd2_phy && dev->rdd2_phy != phy) {
+ /* rdd2 is already locked */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* rdd2 already configured on a radar channel */
+ running = dev->rdd2_phy &&
+ cfg80211_chandef_valid(&dev->rdd2_chandef) &&
+ !!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
+
+ if (!chandef || running ||
+ !(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
+ ret = mt7915_mcu_rdd_background_enable(phy, NULL);
+ if (ret)
+ goto out;
+
+ if (!running)
+ goto update_phy;
+ }
+
+ ret = mt7915_mcu_rdd_background_enable(phy, chandef);
+ if (ret)
+ goto out;
+
+update_phy:
+ dev->rdd2_phy = chandef ? phy : NULL;
+ if (chandef)
+ dev->rdd2_chandef = *chandef;
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
#ifdef CONFIG_MAC80211_DEBUGFS
.sta_add_debugfs = mt7915_sta_add_debugfs,
#endif
+ .set_radar_background = mt7915_set_radar_background,
};
#define MCU_PATCH_ADDRESS 0x200000
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
-
-#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
-
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
-static enum mcu_cipher_type
-mt7915_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7915_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
-static const struct ieee80211_sta_he_cap *
-mt7915_get_he_phy_cap(struct mt7915_phy *phy, struct ieee80211_vif *vif)
-{
- struct ieee80211_supported_band *sband;
- enum nl80211_band band;
-
- band = phy->mt76->chandef.chan->band;
- sband = phy->mt76->hw->wiphy->bands[band];
-
- return ieee80211_get_he_iftype_cap(sband, vif->type);
-}
-
-static u8
-mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
- struct ieee80211_sta_ht_cap *ht_cap;
- struct ieee80211_sta_vht_cap *vht_cap;
- const struct ieee80211_sta_he_cap *he_cap;
- u8 mode = 0;
-
- if (sta) {
- ht_cap = &sta->ht_cap;
- vht_cap = &sta->vht_cap;
- he_cap = &sta->he_cap;
- } else {
- struct ieee80211_supported_band *sband;
-
- sband = mvif->phy->mt76->hw->wiphy->bands[band];
-
- ht_cap = &sband->ht_cap;
- vht_cap = &sband->vht_cap;
- he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
- }
-
- if (band == NL80211_BAND_2GHZ) {
- mode |= PHY_MODE_B | PHY_MODE_G;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_GN;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ) {
- mode |= PHY_MODE_A;
-
- if (ht_cap->ht_supported)
- mode |= PHY_MODE_AN;
-
- if (vht_cap->vht_supported)
- mode |= PHY_MODE_AC;
-
- if (he_cap && he_cap->has_he)
- mode |= PHY_MODE_AX_5G;
- }
-
- return mode;
-}
-
static u8
mt7915_mcu_get_sta_nss(u16 mcs_map)
{
static void
mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
- const u16 *mask)
+ u16 mcs_map)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct cfg80211_chan_def *chandef = &msta->vif->phy->mt76->chandef;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
int nss, max_nss = sta->rx_nss > 3 ? 4 : sta->rx_nss;
- u16 mcs_map;
-
- switch (chandef->width) {
- case NL80211_CHAN_WIDTH_80P80:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80p80);
- break;
- case NL80211_CHAN_WIDTH_160:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
- break;
- default:
- mcs_map = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
- break;
- }
for (nss = 0; nss < max_nss; nss++) {
int mcs;
if (r->band_idx && dev->mt76.phy2)
mphy = dev->mt76.phy2;
- ieee80211_radar_detected(mphy->hw);
+ if (r->band_idx == MT_RX_SEL2)
+ cfg80211_background_radar_event(mphy->hw->wiphy,
+ &dev->rdd2_chandef,
+ GFP_ATOMIC);
+ else
+ ieee80211_radar_detected(mphy->hw);
dev->hw_pattern++;
}
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
const char *data = (char *)&rxd[1];
const char *type;
+ int len = skb->len - sizeof(*rxd);
switch (rxd->s2d_index) {
case 0:
+ if (mt7915_debugfs_rx_log(dev, data, len))
+ return;
+
type = "WM";
break;
case 2:
break;
}
- wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type,
- (int)(skb->len - sizeof(*rxd)), data);
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data);
}
static void
mt76_mcu_rx_event(&dev->mt76, skb);
}
-static struct sk_buff *
-mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
- struct mt7915_sta *msta, int len)
-{
- struct sta_req_hdr hdr = {
- .bss_idx = mvif->mt76.idx,
- .wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
- .wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
- .muar_idx = msta && msta->wcid.sta ? mvif->mt76.omac_idx : 0xe,
- .is_tlv_append = 1,
- };
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, len);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- skb_put_data(skb, &hdr, sizeof(hdr));
-
- return skb;
-}
-
-static struct wtbl_req_hdr *
-mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
- int cmd, void *sta_wtbl, struct sk_buff **skb)
-{
- struct tlv *sta_hdr = sta_wtbl;
- struct wtbl_req_hdr hdr = {
- .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
- .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
- .operation = cmd,
- };
- struct sk_buff *nskb = *skb;
-
- if (!nskb) {
- nskb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!nskb)
- return ERR_PTR(-ENOMEM);
-
- *skb = nskb;
- }
-
- if (sta_hdr)
- le16_add_cpu(&sta_hdr->len, sizeof(hdr));
-
- return skb_put_data(nskb, &hdr, sizeof(hdr));
-}
-
-static struct tlv *
-mt7915_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
- void *sta_ntlv, void *sta_wtbl)
-{
- struct sta_ntlv_hdr *ntlv_hdr = sta_ntlv;
- struct tlv *sta_hdr = sta_wtbl;
- struct tlv *ptlv, tlv = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(len),
- };
- u16 ntlv;
-
- ptlv = skb_put(skb, len);
- memcpy(ptlv, &tlv, sizeof(tlv));
-
- ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
- ntlv_hdr->tlv_num = cpu_to_le16(ntlv + 1);
-
- if (sta_hdr) {
- u16 size = le16_to_cpu(sta_hdr->len);
-
- sta_hdr->len = cpu_to_le16(size + len);
- }
-
- return ptlv;
-}
-
-static struct tlv *
-mt7915_mcu_add_tlv(struct sk_buff *skb, int tag, int len)
-{
- return mt7915_mcu_add_nested_tlv(skb, tag, len, skb->data, NULL);
-}
-
static struct tlv *
mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
__le16 *sub_ntlv, __le16 *len)
}
/** bss info **/
-static int
-mt7915_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct mt7915_phy *phy, bool enable)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_basic *bss;
- u16 wlan_idx = mvif->sta.wcid.idx;
- u32 type = NETWORK_INFRA;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BASIC, sizeof(*bss));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MONITOR:
- break;
- case NL80211_IFTYPE_STATION:
- /* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
- if (enable) {
- struct ieee80211_sta *sta;
- struct mt7915_sta *msta;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
- if (!sta) {
- rcu_read_unlock();
- return -EINVAL;
- }
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- wlan_idx = msta->wcid.idx;
- rcu_read_unlock();
- }
- break;
- case NL80211_IFTYPE_ADHOC:
- type = NETWORK_IBSS;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- bss = (struct bss_info_basic *)tlv;
- bss->network_type = cpu_to_le32(type);
- bss->bmc_wcid_lo = to_wcid_lo(wlan_idx);
- bss->bmc_wcid_hi = to_wcid_hi(wlan_idx);
- bss->wmm_idx = mvif->mt76.wmm_idx;
- bss->active = enable;
-
- if (vif->type != NL80211_IFTYPE_MONITOR) {
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- bss->dtim_period = vif->bss_conf.dtim_period;
- bss->phy_mode = mt7915_get_phy_mode(vif, NULL);
- } else {
- memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
- }
-
- return 0;
-}
-
-static void
-mt7915_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct bss_info_omac *omac;
- struct tlv *tlv;
- u32 type = 0;
- u8 idx;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
-
- switch (vif->type) {
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- type = CONNECTION_INFRA_AP;
- break;
- case NL80211_IFTYPE_STATION:
- type = CONNECTION_INFRA_STA;
- break;
- case NL80211_IFTYPE_ADHOC:
- type = CONNECTION_IBSS_ADHOC;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- omac = (struct bss_info_omac *)tlv;
- idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0
- : mvif->mt76.omac_idx;
- omac->conn_type = cpu_to_le32(type);
- omac->omac_idx = mvif->mt76.omac_idx;
- omac->band_idx = mvif->mt76.band_idx;
- omac->hw_bss_idx = idx;
-}
-
struct mt7915_he_obss_narrow_bw_ru_data {
bool tolerated;
};
struct tlv *tlv;
int freq1 = chandef->center_freq1;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
ch = (struct bss_info_rf_ch *)tlv;
ch->pri_ch = chandef->chan->hw_value;
ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
- ch->bw = mt7915_mcu_chan_bw(chandef);
+ ch->bw = mt76_connac_chan_bw(chandef);
if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
int freq2 = chandef->center_freq2;
struct bss_info_ra *ra;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
ra = (struct bss_info_ra *)tlv;
ra->op_mode = vif->type == NL80211_IFTYPE_AP;
struct bss_info_he *he;
struct tlv *tlv;
- cap = mt7915_get_he_phy_cap(phy, vif);
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_he *)tlv;
he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
struct bss_info_hw_amsdu *amsdu;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct bss_info_hw_amsdu *)tlv;
amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
amsdu->enable = true;
}
-static void
-mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
-{
-/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
-#define BCN_TX_ESTIMATE_TIME (4096 + 20)
- struct bss_info_ext_bss *ext;
- int ext_bss_idx, tsf_offset;
- struct tlv *tlv;
-
- ext_bss_idx = mvif->mt76.omac_idx - EXT_BSSID_START;
- if (ext_bss_idx < 0)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_EXT_BSS, sizeof(*ext));
-
- ext = (struct bss_info_ext_bss *)tlv;
- tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
- ext->mbss_tsf_offset = cpu_to_le32(tsf_offset);
-}
-
static void
mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
{
enum nl80211_band band = chandef->chan->band;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
bmc = (struct bss_info_bmc_rate *)tlv;
if (band == NL80211_BAND_2GHZ) {
struct ieee80211_vif *vif, int enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
struct sk_buff *skb;
if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
mt7915_mcu_muar_config(phy, vif, true, enable);
}
- skb = mt7915_mcu_alloc_sta_req(phy->dev, mvif, NULL,
- MT7915_BSS_UPDATE_MAX_SIZE);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* bss_omac must be first */
if (enable)
- mt7915_mcu_bss_omac_tlv(skb, vif);
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
- mt7915_mcu_bss_basic_tlv(skb, vif, phy, enable);
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
+ mvif->sta.wcid.idx, enable);
if (vif->type == NL80211_IFTYPE_MONITOR)
goto out;
if (mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
- mt7915_mcu_bss_ext_tlv(skb, mvif);
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
}
out:
- return mt76_mcu_skb_send_msg(&phy->dev->mt76, skb,
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
/** starec & wtbl **/
-static int
-mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7915_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7915_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_sec);
- int ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7915_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-static void
-mt7915_mcu_sta_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
-{
- struct sta_rec_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BA, sizeof(*ba));
-
- ba = (struct sta_rec_ba *)tlv;
- ba->ba_type = tx ? MT_BA_TYPE_ORIGINATOR : MT_BA_TYPE_RECIPIENT;
- ba->winsize = cpu_to_le16(params->buf_size);
- ba->ssn = cpu_to_le16(params->ssn);
- ba->ba_en = enable << params->tid;
- ba->amsdu = params->amsdu;
- ba->tid = params->tid;
-}
-
-static void
-mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct wtbl_ba *ba;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_BA, sizeof(*ba),
- wtbl_tlv, sta_wtbl);
-
- ba = (struct wtbl_ba *)tlv;
- ba->tid = params->tid;
-
- if (tx) {
- ba->ba_type = MT_BA_TYPE_ORIGINATOR;
- ba->sn = enable ? cpu_to_le16(params->ssn) : 0;
- ba->ba_en = enable;
- } else {
- memcpy(ba->peer_addr, params->sta->addr, ETH_ALEN);
- ba->ba_type = MT_BA_TYPE_RECIPIENT;
- ba->rst_ba_tid = params->tid;
- ba->rst_ba_sel = RST_BA_MAC_TID_MATCH;
- ba->rst_ba_sb = 1;
- }
-
- if (enable)
- ba->ba_winsize = cpu_to_le16(params->buf_size);
-}
-
-static int
-mt7915_mcu_sta_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable, bool tx)
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
{
struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
struct mt7915_vif *mvif = msta->vif;
- struct wtbl_req_hdr *wtbl_hdr;
- struct tlv *sta_wtbl;
- struct sk_buff *skb;
- int ret;
- if (enable && tx && !params->amsdu)
+ if (enable && !params->amsdu)
msta->wcid.amsdu = false;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
-
- ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
- if (ret)
- return ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
- struct ieee80211_ampdu_params *params,
- bool enable)
-{
- return mt7915_mcu_sta_ba(dev, params, enable, true);
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
}
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
{
- return mt7915_mcu_sta_ba(dev, params, enable, false);
-}
-
-static void
-mt7915_mcu_wtbl_generic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_generic *generic;
- struct wtbl_rx *rx;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_GENERIC, sizeof(*generic),
- wtbl_tlv, sta_wtbl);
-
- generic = (struct wtbl_generic *)tlv;
-
- if (sta) {
- memcpy(generic->peer_addr, sta->addr, ETH_ALEN);
- generic->partial_aid = cpu_to_le16(sta->aid);
- generic->muar_idx = mvif->mt76.omac_idx;
- generic->qos = sta->wme;
- } else {
- /* use BSSID in station mode */
- if (vif->type == NL80211_IFTYPE_STATION)
- memcpy(generic->peer_addr, vif->bss_conf.bssid,
- ETH_ALEN);
- else
- eth_broadcast_addr(generic->peer_addr);
-
- generic->muar_idx = 0xe;
- }
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_RX, sizeof(*rx),
- wtbl_tlv, sta_wtbl);
-
- rx = (struct wtbl_rx *)tlv;
- rx->rca1 = sta ? vif->type != NL80211_IFTYPE_AP : 1;
- rx->rca2 = 1;
- rx->rv = 1;
-}
-
-static void
-mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
-#define EXTRA_INFO_VER BIT(0)
-#define EXTRA_INFO_NEW BIT(1)
- struct sta_rec_basic *basic;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BASIC, sizeof(*basic));
-
- basic = (struct sta_rec_basic *)tlv;
- basic->extra_info = cpu_to_le16(EXTRA_INFO_VER);
-
- if (enable) {
- basic->extra_info |= cpu_to_le16(EXTRA_INFO_NEW);
- basic->conn_state = CONN_STATE_PORT_SECURE;
- } else {
- basic->conn_state = CONN_STATE_DISCONNECT;
- }
-
- if (!sta) {
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
- return;
- }
-
- switch (vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_AP:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_STA);
- break;
- case NL80211_IFTYPE_STATION:
- basic->conn_type = cpu_to_le32(CONNECTION_INFRA_AP);
- break;
- case NL80211_IFTYPE_ADHOC:
- basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- break;
- default:
- WARN_ON(1);
- break;
- }
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->aid = cpu_to_le16(sta->aid);
- basic->qos = sta->wme;
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, false);
}
static void
mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
- enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
- const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ struct ieee80211_he_mcs_nss_supp mcs_map;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
if (!sta->he_cap.has_he)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
he = (struct sta_rec_he *)tlv;
he->he_cap = cpu_to_le32(cap);
+ mcs_map = sta->he_cap.he_mcs_nss_supp;
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (elem->phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW8080],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80p80));
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW160],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_160));
fallthrough;
default:
mt7915_mcu_set_sta_he_mcs(sta,
&he->max_nss_mcs[CMD_HE_MCS_BW80],
- mcs_mask);
+ le16_to_cpu(mcs_map.rx_mcs_80));
break;
}
he->pkt_ext = 2;
}
-static void
-mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif)
-{
- struct sta_rec_uapsd *uapsd;
- struct tlv *tlv;
-
- if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
- return;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
- uapsd = (struct sta_rec_uapsd *)tlv;
-
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
- uapsd->dac_map |= BIT(3);
- uapsd->tac_map |= BIT(3);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
- uapsd->dac_map |= BIT(2);
- uapsd->tac_map |= BIT(2);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
- uapsd->dac_map |= BIT(1);
- uapsd->tac_map |= BIT(1);
- }
- if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
- uapsd->dac_map |= BIT(0);
- uapsd->tac_map |= BIT(0);
- }
- uapsd->max_sp = sta->max_sp;
-}
-
static void
mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
if (!sta->vht_cap.vht_supported)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
struct sta_rec_ht *ht;
struct tlv *tlv;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
if (!sta->vht_cap.vht_supported)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
}
static void
-mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
if (!sta->max_amsdu_len)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- amsdu->max_mpdu_size = sta->max_amsdu_len >=
- IEEE80211_MAX_MPDU_LEN_VHT_7991;
msta->wcid.amsdu = true;
-}
-
-static void
-mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct wtbl_smps *smps;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
- wtbl_tlv, sta_wtbl);
- smps = (struct wtbl_smps *)tlv;
- smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
-}
-
-static void
-mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, void *sta_wtbl,
- void *wtbl_tlv)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_ht *ht = NULL;
- struct tlv *tlv;
- /* wtbl ht */
- if (sta->ht_cap.ht_supported) {
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
- wtbl_tlv, sta_wtbl);
- ht = (struct wtbl_ht *)tlv;
- ht->ldpc = mvif->cap.ldpc &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
- ht->af = sta->ht_cap.ampdu_factor;
- ht->mm = sta->ht_cap.ampdu_density;
- ht->ht = true;
- }
-
- /* wtbl vht */
- if (sta->vht_cap.vht_supported) {
- struct wtbl_vht *vht;
- u8 af;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
- wtbl_tlv, sta_wtbl);
- vht = (struct wtbl_vht *)tlv;
- vht->ldpc = mvif->cap.ldpc &&
- (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
- vht->vht = true;
-
- af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->vht_cap.cap);
- if (ht)
- ht->af = max_t(u8, ht->af, af);
- }
-
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_tlv);
-}
-
-static void
-mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
-{
- struct mt7915_sta *msta;
- struct wtbl_hdr_trans *htr = NULL;
- struct tlv *tlv;
-
- tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HDR_TRANS, sizeof(*htr),
- wtbl_tlv, sta_wtbl);
- htr = (struct wtbl_hdr_trans *)tlv;
- htr->no_rx_trans = true;
- if (vif->type == NL80211_IFTYPE_STATION)
- htr->to_ds = true;
- else
- htr->from_ds = true;
-
- if (!sta)
+ switch (sta->max_amsdu_len) {
+ case IEEE80211_MAX_MPDU_LEN_VHT_11454:
+ if (!is_mt7915(&dev->mt76)) {
+ amsdu->max_mpdu_size =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ return;
+ }
+ fallthrough;
+ case IEEE80211_MAX_MPDU_LEN_HT_7935:
+ case IEEE80211_MAX_MPDU_LEN_VHT_7991:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ return;
+ default:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
return;
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- htr->no_rx_trans = !test_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- if (test_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags)) {
- htr->to_ds = true;
- htr->from_ds = true;
}
}
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta;
struct wtbl_req_hdr *wtbl_hdr;
+ struct mt76_wcid *wcid;
struct tlv *tlv;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ wcid = sta ? &msta->wcid : NULL;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- tlv, &skb);
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_RESET_AND_SET, tlv,
+ &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, tlv, wtbl_hdr);
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, tlv, wtbl_hdr);
-
+ mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv,
+ wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
if (sta)
- mt7915_mcu_wtbl_ht_tlv(skb, vif, sta, tlv, wtbl_hdr);
+ mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
+ wtbl_hdr, mvif->cap.ldpc);
return 0;
}
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
- struct sk_buff *skb;
-
- skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- MT76_CONNAC_WTBL_UPDATE_MAX_SIZE);
- if (!skb)
- return -ENOMEM;
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, NULL, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
-
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, NULL, wtbl_hdr);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_EXT_CMD(WTBL_UPDATE),
- true);
-}
-
static inline bool
mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
{
struct ieee80211_sta_he_cap *pc = &sta->he_cap;
struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
- const struct ieee80211_sta_he_cap *vc = mt7915_get_he_phy_cap(phy, vif);
+ const struct ieee80211_sta_he_cap *vc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
if (!ebf && !dev->ibf)
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
/* he: eBF only, in accordance with spec
if (!mt7915_is_ebf_supported(phy, vif, sta, true))
return;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
if (sta->he_cap.has_he) {
struct sta_rec_ra_fixed *ra;
struct sk_buff *skb;
struct tlv *tlv;
- int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
ra = (struct sta_rec_ra_fixed *)tlv;
switch (field) {
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
-
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
- &skb);
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
if (IS_ERR(wtbl_hdr))
return PTR_ERR(wtbl_hdr);
- mt7915_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
phy.sgi |= gi << (i << (_he)); \
phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
} \
- for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \
- nrates += hweight16(mask->control[band]._mcs[i]); \
- phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \
+ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \
+ if (!mask->control[band]._mcs[i]) \
+ continue; \
+ nrates += hweight16(mask->control[band]._mcs[i]); \
+ phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ } \
} while (0)
if (sta->he_cap.has_he) {
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
enum nl80211_band band = chandef->chan->band;
struct sta_rec_ra *ra;
u32 supp_rate = sta->supp_rates[band];
u32 cap = sta->wme ? STA_CAP_WMM : 0;
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
ra = (struct sta_rec_ra *)tlv;
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt7915_get_phy_mode(vif, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
ra->bw = sta->bandwidth;
ra->phy.bw = sta->bandwidth;
struct sk_buff *skb;
int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT76_CONNAC_STA_UPDATE_MAX_SIZE);
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec basic */
- mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
+ mt76_connac_mcu_sta_basic_tlv(skb, vif, sta, enable, true);
if (!enable)
goto out;
/* starec vht */
mt7915_mcu_sta_vht_tlv(skb, sta);
/* starec uapsd */
- mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
}
ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
if (sta && sta->ht_cap.ht_supported) {
/* starec amsdu */
- mt7915_mcu_sta_amsdu_tlv(skb, vif, sta);
+ mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
/* starec he */
mt7915_mcu_sta_he_tlv(skb, sta, vif);
/* starec muru */
}
ret = mt7915_mcu_add_group(dev, vif, sta);
- if (ret)
+ if (ret) {
+ dev_kfree_skb(skb);
return ret;
+ }
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
mgmt->u.beacon.variable, len);
if (ie && ie[1] >= sizeof(*he) + 1) {
const struct ieee80211_sta_he_cap *pc =
- mt7915_get_he_phy_cap(phy, vif);
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
he = (void *)(ie + 3);
struct bss_info_bcn *bcn;
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(rskb))
return PTR_ERR(rskb);
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
bcn = (struct bss_info_bcn *)tlv;
bcn->enable = en;
MCU_EXT_CMD(BSS_INFO_UPDATE), true);
}
-static int mt7915_mcu_start_firmware(struct mt7915_dev *dev, u32 addr,
- u32 option)
-{
- struct {
- __le32 option;
- __le32 addr;
- } req = {
- .option = cpu_to_le32(option),
- .addr = cpu_to_le32(addr),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(FW_START_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-
-static int mt7915_mcu_patch_sem_ctrl(struct mt7915_dev *dev, bool get)
+static int mt7915_driver_own(struct mt7915_dev *dev, u8 band)
{
- struct {
- __le32 op;
- } req = {
- .op = cpu_to_le32(get ? PATCH_SEM_GET : PATCH_SEM_RELEASE),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_SEM_CONTROL), &req,
- sizeof(req), true);
-}
-
-static int mt7915_mcu_start_patch(struct mt7915_dev *dev)
-{
- struct {
- u8 check_crc;
- u8 reserved[3];
- } req = {
- .check_crc = 0,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD(PATCH_FINISH_REQ), &req,
- sizeof(req), true);
-}
-
-static int mt7915_driver_own(struct mt7915_dev *dev)
-{
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND0,
- MT_TOP_LPCR_HOST_FW_OWN, 0, 500)) {
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band),
+ MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) {
dev_err(dev->mt76.dev, "Timeout for driver own\n");
return -EIO;
}
- return 0;
-}
-
-static int mt7915_mcu_init_download(struct mt7915_dev *dev, u32 addr,
- u32 len, u32 mode)
-{
- struct {
- __le32 addr;
- __le32 len;
- __le32 mode;
- } req = {
- .addr = cpu_to_le32(addr),
- .len = cpu_to_le32(len),
- .mode = cpu_to_le32(mode),
- };
- int attr;
-
- if (req.addr == cpu_to_le32(MCU_PATCH_ADDRESS))
- attr = MCU_CMD(PATCH_START_REQ);
- else
- attr = MCU_CMD(TARGET_ADDRESS_LEN_REQ);
+ /* clear irq when the driver own success */
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band),
+ MT_TOP_LPCR_HOST_BAND_STAT);
- return mt76_mcu_send_msg(&dev->mt76, attr, &req, sizeof(req), true);
+ return 0;
}
static int mt7915_load_patch(struct mt7915_dev *dev)
{
const struct mt7915_patch_hdr *hdr;
const struct firmware *fw = NULL;
+ const char *patch;
int i, ret, sem;
- sem = mt7915_mcu_patch_sem_ctrl(dev, 1);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 1);
switch (sem) {
case PATCH_IS_DL:
return 0;
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7915_ROM_PATCH, dev->mt76.dev);
+ patch = is_mt7915(&dev->mt76) ? MT7915_ROM_PATCH : MT7916_ROM_PATCH;
+ ret = request_firmware(&fw, patch, dev->mt76.dev);
if (ret)
goto out;
len = be32_to_cpu(sec->info.len);
dl = fw->data + be32_to_cpu(sec->offs);
- ret = mt7915_mcu_init_download(dev, addr, len,
- DL_MODE_NEED_RSP);
+ ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ DL_MODE_NEED_RSP);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
}
}
- ret = mt7915_mcu_start_patch(dev);
+ ret = mt76_connac_mcu_start_patch(&dev->mt76);
if (ret)
dev_err(dev->mt76.dev, "Failed to start patch\n");
out:
- sem = mt7915_mcu_patch_sem_ctrl(dev, 0);
+ sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, 0);
switch (sem) {
case PATCH_REL_SEM_SUCCESS:
break;
return ret;
}
-static u32 mt7915_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
const struct mt7915_fw_trailer *hdr,
region = (const struct mt7915_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
- mode = mt7915_mcu_gen_dl_mode(region->feature_set, is_wa);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
if (region->feature_set & FW_FEATURE_OVERRIDE_ADDR)
override = addr;
- err = mt7915_mcu_init_download(dev, addr, len, mode);
+ err = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
+ mode);
if (err) {
dev_err(dev->mt76.dev, "Download request failed\n");
return err;
if (is_wa)
option |= FW_START_WORKING_PDA_CR4;
- return mt7915_mcu_start_firmware(dev, override, option);
+ return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
}
static int mt7915_load_ram(struct mt7915_dev *dev)
{
const struct mt7915_fw_trailer *hdr;
const struct firmware *fw;
+ const char *mcu;
int ret;
- ret = request_firmware(&fw, MT7915_FIRMWARE_WM, dev->mt76.dev);
+ mcu = is_mt7915(&dev->mt76) ? MT7915_FIRMWARE_WM : MT7916_FIRMWARE_WM;
+ ret = request_firmware(&fw, mcu, dev->mt76.dev);
if (ret)
return ret;
release_firmware(fw);
- ret = request_firmware(&fw, MT7915_FIRMWARE_WA, dev->mt76.dev);
+ mcu = is_mt7915(&dev->mt76) ? MT7915_FIRMWARE_WA : MT7916_FIRMWARE_WA;
+ ret = request_firmware(&fw, mcu, dev->mt76.dev);
if (ret)
return ret;
return ret;
}
+static int
+mt7915_firmware_state(struct mt7915_dev *dev, bool wa)
+{
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
+ state, 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+ return 0;
+}
+
static int mt7915_load_firmware(struct mt7915_dev *dev)
{
int ret;
+ /* make sure fw is download state */
+ if (mt7915_firmware_state(dev, false)) {
+ /* restart firmware once */
+ __mt76_mcu_restart(&dev->mt76);
+ ret = mt7915_firmware_state(dev, false);
+ if (ret) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return ret;
+ }
+ }
+
ret = mt7915_load_patch(dev);
if (ret)
return ret;
if (ret)
return ret;
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_RDY), 1000)) {
- dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
- return -EIO;
- }
+ ret = mt7915_firmware_state(dev, true);
+ if (ret)
+ return ret;
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
.headroom = sizeof(struct mt7915_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt7915_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int ret;
dev->mt76.mcu_ops = &mt7915_mcu_ops;
- ret = mt7915_driver_own(dev);
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ if (is_mt7915(&dev->mt76))
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+ else
+ mt76_wr(dev, MT_SWDEF_MODE_MT7916, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7915_driver_own(dev, 0);
if (ret)
return ret;
+ /* set driver own for band1 when two hif exist */
+ if (dev->hif2) {
+ ret = mt7915_driver_own(dev, 1);
+ if (ret)
+ return ret;
+ }
ret = mt7915_load_firmware(dev);
if (ret)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
- FIELD_PREP(MT_TOP_MISC_FW_STATE,
- FW_STATE_FW_DOWNLOAD), 1000)) {
+ if (mt7915_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
return;
}
- mt76_wr(dev, MT_TOP_LPCR_HOST_BAND0, MT_TOP_LPCR_HOST_FW_OWN);
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ if (dev->hif2)
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ MT_TOP_LPCR_HOST_FW_OWN);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
sizeof(req), false);
}
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val)
-{
- struct mt7915_dev *dev = phy->dev;
- struct {
- u8 prot_idx;
- u8 band;
- u8 rsv[2];
- __le32 len_thresh;
- __le32 pkt_thresh;
- } __packed req = {
- .prot_idx = 1,
- .band = phy != &dev->phy,
- .len_thresh = cpu_to_le32(val),
- .pkt_thresh = cpu_to_le32(0x2),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PROTECT_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
{
struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
return mt7915_mcu_update_edca(dev, &req);
}
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter)
-{
-#define ENTER_PM_STATE 1
-#define EXIT_PM_STATE 2
- struct {
- u8 pm_number;
- u8 pm_state;
- u8 bssid[ETH_ALEN];
- u8 dtim_period;
- u8 wlan_idx_lo;
- __le16 bcn_interval;
- __le32 aid;
- __le32 rx_filter;
- u8 band_idx;
- u8 wlan_idx_hi;
- u8 rsv[2];
- __le32 feature;
- u8 omac_idx;
- u8 wmm_idx;
- u8 bcn_loss_cnt;
- u8 bcn_sp_duration;
- } __packed req = {
- .pm_number = 5,
- .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
- .band_idx = band,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(PM_STATE_CTRL), &req,
- sizeof(req), true);
-}
-
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev,
- enum mt7915_rdd_cmd cmd, u8 index,
- u8 rx_sel, u8 val)
-{
- struct {
- u8 ctrl;
- u8 rdd_idx;
- u8 rdd_rx_sel;
- u8 val;
- u8 rsv[4];
- } __packed req = {
- .ctrl = cmd,
- .rdd_idx = index,
- .rdd_rx_sel = rx_sel,
- .val = val,
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_CTRL), &req,
- sizeof(req), true);
-}
-
int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
{
struct {
sizeof(req), true);
}
+static int
+mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef,
+ int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->chandef.chan;
+ int freq = mphy->chandef.center_freq1;
+ struct mt7915_mcu_background_chain_ctrl req = {
+ .monitor_scan_type = 2, /* simple rx */
+ };
+
+ if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP)
+ return -EINVAL;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ switch (cmd) {
+ case CH_SWITCH_BACKGROUND_SCAN_START: {
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.monitor_bw = mt76_connac_chan_bw(chandef);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 1;
+ break;
+ }
+ case CH_SWITCH_BACKGROUND_SCAN_RUNNING:
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.band_idx = phy != &dev->phy;
+ req.scan_mode = 2;
+ break;
+ case CH_SWITCH_BACKGROUND_SCAN_STOP:
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.tx_stream = hweight8(mphy->antenna_mask);
+ req.rx_stream = mphy->antenna_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int err, region;
+
+ if (!chandef) { /* disable offchain */
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
+ 0, 0);
+ if (err)
+ return err;
+
+ return mt7915_mcu_background_chain_ctrl(phy, NULL,
+ CH_SWITCH_BACKGROUND_SCAN_STOP);
+ }
+
+ err = mt7915_mcu_background_chain_ctrl(phy, chandef,
+ CH_SWITCH_BACKGROUND_SCAN_START);
+ if (err)
+ return err;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
+ 0, region);
+}
+
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
{
struct mt7915_dev *dev = phy->dev;
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7915_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = ext_phy,
if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ else if (phy->mt76->hw->conf.radar_enabled &&
chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
req.switch_reason = CH_SWITCH_DFS;
else
#define PAGE_IDX_MASK GENMASK(4, 2)
#define PER_PAGE_SIZE 0x400
struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
- u8 total = DIV_ROUND_UP(MT7915_EEPROM_SIZE, PER_PAGE_SIZE);
+ u16 eeprom_size = mt7915_eeprom_size(dev);
+ u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
int eep_len;
int i;
struct sk_buff *skb;
int ret;
- if (i == total - 1 && !!(MT7915_EEPROM_SIZE % PER_PAGE_SIZE))
- eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE;
+ if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE))
+ eep_len = eeprom_size % PER_PAGE_SIZE;
else
eep_len = PER_PAGE_SIZE;
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
/* strict order */
- static const enum mt7915_chan_mib_offs offs[] = {
- MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME
+ static const u32 offs[] = {
+ MIB_BUSY_TIME, MIB_TX_TIME, MIB_RX_TIME, MIB_OBSS_AIRTIME,
+ MIB_BUSY_TIME_V2, MIB_TX_TIME_V2, MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
};
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[4];
struct sk_buff *skb;
- int i, ret;
+ int i, ret, start = 0;
+
+ if (!is_mt7915(&dev->mt76))
+ start = 4;
for (i = 0; i < 4; i++) {
req[i].band = cpu_to_le32(phy != &dev->phy);
- req[i].offs = cpu_to_le32(offs[i]);
+ req[i].offs = cpu_to_le32(offs[i + start]);
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
struct sk_buff *skb;
struct tlv *tlv;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
if (IS_ERR(skb))
return PTR_ERR(skb);
- tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, sizeof(*bss_color));
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR,
+ sizeof(*bss_color));
bss_color = (struct bss_info_color *)tlv;
bss_color->disable = !he_bss_color->enabled;
bss_color->color = he_bss_color->color;
} hw_pulse[32];
} __packed;
+struct mt7915_mcu_background_chain_ctrl {
+ u8 chan; /* primary channel */
+ u8 central_chan; /* central channel */
+ u8 bw;
+ u8 tx_stream;
+ u8 rx_stream;
+
+ u8 monitor_chan; /* monitor channel */
+ u8 monitor_central_chan;/* monitor central channel */
+ u8 monitor_bw;
+ u8 monitor_tx_stream;
+ u8 monitor_rx_stream;
+
+ u8 scan_mode; /* 0: ScanStop
+ * 1: ScanStart
+ * 2: ScanRunning
+ */
+ u8 band_idx; /* DBDC */
+ u8 monitor_scan_type;
+ u8 band; /* 0: 2.4GHz, 1: 5GHz */
+ u8 rsv[2];
+} __packed;
+
struct mt7915_mcu_eeprom {
u8 buffer_mode;
u8 format;
} __packed;
enum mt7915_chan_mib_offs {
+ /* mt7915 */
MIB_BUSY_TIME = 14,
MIB_TX_TIME = 81,
MIB_RX_TIME,
- MIB_OBSS_AIRTIME = 86
+ MIB_OBSS_AIRTIME = 86,
+ /* mt7916 */
+ MIB_BUSY_TIME_V2 = 0,
+ MIB_TX_TIME_V2 = 6,
+ MIB_RX_TIME_V2 = 8,
+ MIB_OBSS_AIRTIME_V2 = 490
};
struct edca {
MCU_MMPS_DISABLE,
};
-#define STA_TYPE_STA BIT(0)
-#define STA_TYPE_AP BIT(1)
-#define STA_TYPE_ADHOC BIT(2)
-#define STA_TYPE_WDS BIT(4)
-#define STA_TYPE_BC BIT(5)
-
-#define NETWORK_INFRA BIT(16)
-#define NETWORK_P2P BIT(17)
-#define NETWORK_IBSS BIT(18)
-#define NETWORK_WDS BIT(21)
-
-#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
-#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
-#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
-#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
-#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
-#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
-#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
-
-#define CONN_STATE_DISCONNECT 0
-#define CONN_STATE_CONNECT 1
-#define CONN_STATE_PORT_SECURE 2
-
enum {
SCS_SEND_DATA,
SCS_SET_MANUAL_PD_TH,
// SPDX-License-Identifier: ISC
/* Copyright (C) 2020 MediaTek Inc. */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static const u32 mt7915_reg[] = {
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+ [INT1_SOURCE_CSR] = 0xd7088,
+ [INT1_MASK_CSR] = 0xd708c,
+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
+ [INT_MCU_CMD_EVENT] = 0x3108,
+};
+
+static const u32 mt7916_reg[] = {
+ [INT_SOURCE_CSR] = 0xd4200,
+ [INT_MASK_CSR] = 0xd4204,
+ [INT1_SOURCE_CSR] = 0xd8200,
+ [INT1_MASK_CSR] = 0xd8204,
+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
+ [INT_MCU_CMD_EVENT] = 0x2108,
+};
+
+static const u32 mt7915_offs[] = {
+ [TMAC_CDTR] = 0x090,
+ [TMAC_ODTR] = 0x094,
+ [TMAC_ATCR] = 0x098,
+ [TMAC_TRCR0] = 0x09c,
+ [TMAC_ICR0] = 0x0a4,
+ [TMAC_ICR1] = 0x0b4,
+ [TMAC_CTCR0] = 0x0f4,
+ [TMAC_TFCR0] = 0x1e0,
+ [MDP_BNRCFR0] = 0x070,
+ [MDP_BNRCFR1] = 0x074,
+ [ARB_DRNGR0] = 0x194,
+ [ARB_SCR] = 0x080,
+ [RMAC_MIB_AIRTIME14] = 0x3b8,
+ [AGG_AWSCR0] = 0x05c,
+ [AGG_PCR0] = 0x06c,
+ [AGG_ACR0] = 0x084,
+ [AGG_MRCR] = 0x098,
+ [AGG_ATCR1] = 0x0f0,
+ [AGG_ATCR3] = 0x0f4,
+ [LPON_UTTR0] = 0x080,
+ [LPON_UTTR1] = 0x084,
+ [LPON_FRCR] = 0x314,
+ [MIB_SDR3] = 0x014,
+ [MIB_SDR4] = 0x018,
+ [MIB_SDR5] = 0x01c,
+ [MIB_SDR7] = 0x024,
+ [MIB_SDR8] = 0x028,
+ [MIB_SDR9] = 0x02c,
+ [MIB_SDR10] = 0x030,
+ [MIB_SDR11] = 0x034,
+ [MIB_SDR12] = 0x038,
+ [MIB_SDR13] = 0x03c,
+ [MIB_SDR14] = 0x040,
+ [MIB_SDR15] = 0x044,
+ [MIB_SDR16] = 0x048,
+ [MIB_SDR17] = 0x04c,
+ [MIB_SDR18] = 0x050,
+ [MIB_SDR19] = 0x054,
+ [MIB_SDR20] = 0x058,
+ [MIB_SDR21] = 0x05c,
+ [MIB_SDR22] = 0x060,
+ [MIB_SDR23] = 0x064,
+ [MIB_SDR24] = 0x068,
+ [MIB_SDR25] = 0x06c,
+ [MIB_SDR27] = 0x074,
+ [MIB_SDR28] = 0x078,
+ [MIB_SDR29] = 0x07c,
+ [MIB_SDRVEC] = 0x080,
+ [MIB_SDR31] = 0x084,
+ [MIB_SDR32] = 0x088,
+ [MIB_SDRMUBF] = 0x090,
+ [MIB_DR8] = 0x0c0,
+ [MIB_DR9] = 0x0c4,
+ [MIB_DR11] = 0x0cc,
+ [MIB_MB_SDR0] = 0x100,
+ [MIB_MB_SDR1] = 0x104,
+ [TX_AGG_CNT] = 0x0a8,
+ [TX_AGG_CNT2] = 0x164,
+ [MIB_ARNG] = 0x4b8,
+ [WTBLON_TOP_WDUCR] = 0x0,
+ [WTBL_UPDATE] = 0x030,
+ [PLE_FL_Q_EMPTY] = 0x0b0,
+ [PLE_FL_Q_CTRL] = 0x1b0,
+ [PLE_AC_QEMPTY] = 0x500,
+ [PLE_FREEPG_CNT] = 0x100,
+ [PLE_FREEPG_HEAD_TAIL] = 0x104,
+ [PLE_PG_HIF_GROUP] = 0x110,
+ [PLE_HIF_PG_INFO] = 0x114,
+ [AC_OFFSET] = 0x040,
+};
+
+static const u32 mt7916_offs[] = {
+ [TMAC_CDTR] = 0x0c8,
+ [TMAC_ODTR] = 0x0cc,
+ [TMAC_ATCR] = 0x00c,
+ [TMAC_TRCR0] = 0x010,
+ [TMAC_ICR0] = 0x014,
+ [TMAC_ICR1] = 0x018,
+ [TMAC_CTCR0] = 0x114,
+ [TMAC_TFCR0] = 0x0e4,
+ [MDP_BNRCFR0] = 0x090,
+ [MDP_BNRCFR1] = 0x094,
+ [ARB_DRNGR0] = 0x1e0,
+ [ARB_SCR] = 0x000,
+ [RMAC_MIB_AIRTIME14] = 0x0398,
+ [AGG_AWSCR0] = 0x030,
+ [AGG_PCR0] = 0x040,
+ [AGG_ACR0] = 0x054,
+ [AGG_MRCR] = 0x068,
+ [AGG_ATCR1] = 0x1a8,
+ [AGG_ATCR3] = 0x080,
+ [LPON_UTTR0] = 0x360,
+ [LPON_UTTR1] = 0x364,
+ [LPON_FRCR] = 0x37c,
+ [MIB_SDR3] = 0x698,
+ [MIB_SDR4] = 0x788,
+ [MIB_SDR5] = 0x780,
+ [MIB_SDR7] = 0x5a8,
+ [MIB_SDR8] = 0x78c,
+ [MIB_SDR9] = 0x024,
+ [MIB_SDR10] = 0x76c,
+ [MIB_SDR11] = 0x790,
+ [MIB_SDR12] = 0x558,
+ [MIB_SDR13] = 0x560,
+ [MIB_SDR14] = 0x564,
+ [MIB_SDR15] = 0x568,
+ [MIB_SDR16] = 0x7fc,
+ [MIB_SDR17] = 0x800,
+ [MIB_SDR18] = 0x030,
+ [MIB_SDR19] = 0x5ac,
+ [MIB_SDR20] = 0x5b0,
+ [MIB_SDR21] = 0x5b4,
+ [MIB_SDR22] = 0x770,
+ [MIB_SDR23] = 0x774,
+ [MIB_SDR24] = 0x778,
+ [MIB_SDR25] = 0x77c,
+ [MIB_SDR27] = 0x080,
+ [MIB_SDR28] = 0x084,
+ [MIB_SDR29] = 0x650,
+ [MIB_SDRVEC] = 0x5a8,
+ [MIB_SDR31] = 0x55c,
+ [MIB_SDR32] = 0x7a8,
+ [MIB_SDRMUBF] = 0x7ac,
+ [MIB_DR8] = 0x56c,
+ [MIB_DR9] = 0x570,
+ [MIB_DR11] = 0x574,
+ [MIB_MB_SDR0] = 0x688,
+ [MIB_MB_SDR1] = 0x690,
+ [TX_AGG_CNT] = 0x7dc,
+ [TX_AGG_CNT2] = 0x7ec,
+ [MIB_ARNG] = 0x0b0,
+ [WTBLON_TOP_WDUCR] = 0x200,
+ [WTBL_UPDATE] = 0x230,
+ [PLE_FL_Q_EMPTY] = 0x360,
+ [PLE_FL_Q_CTRL] = 0x3e0,
+ [PLE_AC_QEMPTY] = 0x600,
+ [PLE_FREEPG_CNT] = 0x380,
+ [PLE_FREEPG_HEAD_TAIL] = 0x384,
+ [PLE_PG_HIF_GROUP] = 0x00c,
+ [PLE_HIF_PG_INFO] = 0x388,
+ [AC_OFFSET] = 0x080,
+};
+
+static const struct __map mt7915_reg_map[] = {
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct __map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x1000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x1000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x2000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000}, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x1000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x1000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000}, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000}, /* WF_TOP_MISC_ON */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
{
u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_remap = is_mt7915(&dev->mt76) ?
+ MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
- mt76_rmw_field(dev, MT_HIF_REMAP_L1, MT_HIF_REMAP_L1_MASK, base);
+ dev->bus_ops->rmw(&dev->mt76, l1_remap,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
/* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L1);
+ dev->bus_ops->rr(&dev->mt76, l1_remap);
return MT_HIF_REMAP_BASE_L1 + offset;
}
static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
{
- u32 offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
- u32 base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+ u32 offset, base;
- mt76_rmw_field(dev, MT_HIF_REMAP_L2, MT_HIF_REMAP_L2_MASK, base);
- /* use read to push write */
- mt76_rr(dev, MT_HIF_REMAP_L2);
+ if (is_mt7915(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ } else {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916,
+ MT_HIF_REMAP_L2_MASK_MT7916,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916);
- return MT_HIF_REMAP_BASE_L2 + offset;
+ offset += MT_HIF_REMAP_BASE_L2_MT7916;
+ }
+
+ return offset;
}
static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
int i;
if (addr < 0x100000)
return addr;
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ if (!dev->reg.map) {
+ dev_err(dev->mt76.dev, "err: reg_map is null\n");
+ return addr;
+ }
+
+ for (i = 0; i < dev->reg.map_size; i++) {
u32 ofs;
- if (addr < fixed_map[i].phys)
+ if (addr < dev->reg.map[i].phys)
continue;
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
+ ofs = addr - dev->reg.map[i].phys;
+ if (ofs > dev->reg.map[i].size)
continue;
- return fixed_map[i].mapped + ofs;
+ return dev->reg.map[i].maps + ofs;
}
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000))
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END) ||
+ (addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END))
return mt7915_reg_map_l1(dev, addr);
return mt7915_reg_map_l2(dev, addr);
return dev->bus_ops->rmw(mdev, addr, mask, val);
}
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq)
+static int mt7915_mmio_init(struct mt76_dev *mdev,
+ void __iomem *mem_base,
+ u32 device_id)
{
struct mt76_bus_ops *bus_ops;
struct mt7915_dev *dev;
dev = container_of(mdev, struct mt7915_dev, mt76);
mt76_mmio_init(&dev->mt76, mem_base);
+ switch (device_id) {
+ case 0x7915:
+ dev->reg.reg_rev = mt7915_reg;
+ dev->reg.offs_rev = mt7915_offs;
+ dev->reg.map = mt7915_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7915_reg_map);
+ break;
+ case 0x7906:
+ dev->reg.reg_rev = mt7916_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7916_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
bus_ops->rmw = mt7915_rmw;
dev->mt76.bus = bus_ops;
- mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ mdev->rev = (device_id << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ return 0;
+}
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
+ bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
+ enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
+ u32 intr, intr1, mask;
+
mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
- return 0;
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX(MT_RXQ_MAIN))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+
+ if (intr & MT_INT_RX(MT_RXQ_EXT))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+
+ if (!is_mt7915(&dev->mt76) &&
+ (intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
+
+ if (intr & MT_INT_RX(MT_RXQ_EXT_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & MT_MCU_CMD_ERROR_MASK) {
+ dev->reset_state = val;
+ ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
+ wake_up(&dev->reset_wait);
+ }
+ }
+}
+
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt7915_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_check = mt7915_rx_check,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_ps = mt7915_sta_ps,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct ieee80211_ops *ops;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ops = devm_kmemdup(pdev, &mt7915_ops, sizeof(mt7915_ops), GFP_KERNEL);
+ if (!ops)
+ return ERR_PTR(-ENOMEM);
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), ops, &drv_ops);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ ret = mt7915_mmio_init(mdev, mem_base, device_id);
+ if (ret)
+ goto error;
+
+ tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ return dev;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ERR_PTR(ret);
}
+
+static int __init mt7915_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7915_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7915_pci_driver);
+ if (ret)
+ pci_unregister_driver(&mt7915_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7915_exit(void)
+{
+ pci_unregister_driver(&mt7915_pci_driver);
+ pci_unregister_driver(&mt7915_hif_driver);
+}
+
+module_init(mt7915_init);
+module_exit(mt7915_exit);
+MODULE_LICENSE("Dual BSD/GPL");
#include <linux/interrupt.h>
#include <linux/ktime.h>
-#include "../mt76.h"
+#include "../mt76_connac.h"
#include "regs.h"
#define MT7915_MAX_INTERFACES 19
#define MT7915_MAX_WMM_SETS 4
#define MT7915_WTBL_SIZE 288
-#define MT7915_WTBL_RESERVED (MT7915_WTBL_SIZE - 1)
+#define MT7916_WTBL_SIZE 544
+#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
MT7915_MAX_INTERFACES)
#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+#define MT7916_FIRMWARE_WA "mediatek/mt7916_wa.bin"
+#define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin"
+#define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin"
+
#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
+#define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin"
#define MT7915_EEPROM_SIZE 3584
+#define MT7916_EEPROM_SIZE 4096
+
#define MT7915_EEPROM_BLOCK_SIZE 16
#define MT7915_TOKEN_SIZE 8192
#define MT7915_MAX_TWT_AGRT 16
#define MT7915_MAX_STA_TWT_AGRT 8
+#define MT7915_MAX_QUEUE (__MT_RXQ_MAX + __MT_MCUQ_MAX + 2)
struct mt7915_vif;
struct mt7915_sta;
MT7915_RXQ_MCU_WA_EXT,
};
-struct mt7915_sta_key_conf {
- s8 keyidx;
- u8 key[16];
+enum mt7916_rxq_id {
+ MT7916_RXQ_MCU_WM = 0,
+ MT7916_RXQ_MCU_WA,
+ MT7916_RXQ_MCU_WA_MAIN,
+ MT7916_RXQ_MCU_WA_EXT,
+ MT7916_RXQ_BAND0,
+ MT7916_RXQ_BAND1,
};
struct mt7915_twt_flow {
struct mt76_sta_stats stats;
- struct mt7915_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
struct {
u8 flowid_mask;
u8 slottime;
u8 rdd_state;
- int dfs_state;
u32 rx_ampdu_ts;
u32 ampdu_ref;
};
struct mt7915_hif *hif2;
+ struct mt7915_reg_desc reg;
+ u8 q_id[MT7915_MAX_QUEUE];
+ u32 q_int_mask[MT7915_MAX_QUEUE];
+ u32 wfdma_mask;
const struct mt76_bus_ops *bus_ops;
struct tasklet_struct irq_tasklet;
struct mt7915_phy phy;
+ /* monitor rx chain configured channel */
+ struct cfg80211_chan_def rdd2_chandef;
+ struct mt7915_phy *rdd2_phy;
+
u16 chainmask;
u32 hif_idx;
bool ibf;
u8 fw_debug_wm;
u8 fw_debug_wa;
+ u8 fw_debug_bin;
+
+ struct dentry *debugfs_dir;
+ struct rchan *relay_fwlog;
void *cal;
} twt;
};
+enum {
+ WFDMA0 = 0x0,
+ WFDMA1,
+ WFDMA_EXT,
+ __MT_WFDMA_MAX,
+};
+
enum {
MT_CTX0,
MT_HIF0 = 0x0,
enum {
MT_RX_SEL0,
MT_RX_SEL1,
+ MT_RX_SEL2, /* monitor chain */
};
enum mt7915_rdd_cmd {
return phy->priv;
}
-static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
-{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
-}
-
extern const struct ieee80211_ops mt7915_ops;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
+extern struct pci_driver mt7915_pci_driver;
+extern struct pci_driver mt7915_hif_driver;
-u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
-void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy);
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy);
int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
-int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
-int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct mt7915_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 en);
int mt7915_mcu_set_scs(struct mt7915_dev *dev, u8 band, bool enable);
int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
-int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val);
-int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter);
int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
-int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
- u8 index, u8 rx_sel, u8 val);
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef);
int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
-static inline bool is_mt7915(struct mt76_dev *dev)
+static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
+{
+ return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
+}
+
+static inline u16 mt7915_eeprom_size(struct mt7915_dev *dev)
{
- return mt76_chip(dev) == 0x7915;
+ return is_mt7915(&dev->mt76) ? MT7915_EEPROM_SIZE : MT7916_EEPROM_SIZE;
}
void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
void mt7915_mac_update_stats(struct mt7915_phy *phy);
-int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
struct mt7915_sta *msta,
u8 flowid);
struct mt76_tx_info *tx_info);
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_tx_token_put(struct mt7915_dev *dev);
-int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc);
+int mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy, void *ms);
int mt7915_init_debugfs(struct mt7915_phy *phy);
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
static const struct pci_device_id mt7915_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7906) },
{ },
};
static const struct pci_device_id mt7915_hif_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x790a) },
{ },
};
-void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
- u32 clear, u32 set)
-{
- struct mt76_dev *mdev = &dev->mt76;
- unsigned long flags;
-
- spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
-
- mdev->mmio.irqmask &= ~clear;
- mdev->mmio.irqmask |= set;
-
- if (write_reg) {
- mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
- mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
- }
-
- spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
-}
-
-static struct mt7915_hif *
-mt7915_pci_get_hif2(struct mt7915_dev *dev)
+static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
{
struct mt7915_hif *hif;
u32 val;
list_for_each_entry(hif, &hif_list, list) {
val = readl(hif->regs + MT_PCIE_RECOG_ID);
val &= MT_PCIE_RECOG_ID_MASK;
- if (val != dev->hif_idx)
+ if (val != idx)
continue;
get_device(hif->dev);
put_device(hif->dev);
}
-static void
-mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
-{
- struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
- static const u32 rx_irq_mask[] = {
- [MT_RXQ_MAIN] = MT_INT_RX_DONE_DATA0,
- [MT_RXQ_EXT] = MT_INT_RX_DONE_DATA1,
- [MT_RXQ_MCU] = MT_INT_RX_DONE_WM,
- [MT_RXQ_MCU_WA] = MT_INT_RX_DONE_WA,
- [MT_RXQ_EXT_WA] = MT_INT_RX_DONE_WA_EXT,
- };
-
- mt7915_irq_enable(dev, rx_irq_mask[q]);
-}
-
-/* TODO: support 2/4/6/8 MSI-X vectors */
-static void mt7915_irq_tasklet(struct tasklet_struct *t)
+static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
{
- struct mt7915_dev *dev = from_tasklet(dev, t, irq_tasklet);
- u32 intr, intr1, mask;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
- intr &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
-
- if (dev->hif2) {
- intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
- intr1 &= dev->mt76.mmio.irqmask;
- mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
-
- intr |= intr1;
- }
-
- trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
-
- mask = intr & MT_INT_RX_DONE_ALL;
- if (intr & MT_INT_TX_DONE_MCU)
- mask |= MT_INT_TX_DONE_MCU;
+ hif_idx++;
+ if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL) &&
+ !pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL))
+ return NULL;
- mt7915_irq_disable(dev, mask);
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
- if (intr & MT_INT_TX_DONE_MCU)
- napi_schedule(&dev->mt76.tx_napi);
-
- if (intr & MT_INT_RX_DONE_DATA0)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
-
- if (intr & MT_INT_RX_DONE_DATA1)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT]);
-
- if (intr & MT_INT_RX_DONE_WM)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
-
- if (intr & MT_INT_RX_DONE_WA)
- napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
-
- if (intr & MT_INT_RX_DONE_WA_EXT)
- napi_schedule(&dev->mt76.napi[MT_RXQ_EXT_WA]);
-
- if (intr & MT_INT_MCU_CMD) {
- u32 val = mt76_rr(dev, MT_MCU_CMD);
-
- mt76_wr(dev, MT_MCU_CMD, val);
- if (val & MT_MCU_CMD_ERROR_MASK) {
- dev->reset_state = val;
- ieee80211_queue_work(mt76_hw(dev), &dev->reset_work);
- wake_up(&dev->reset_wait);
- }
- }
-}
-
-static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
-{
- struct mt7915_dev *dev = dev_instance;
-
- mt76_wr(dev, MT_INT_MASK_CSR, 0);
- if (dev->hif2)
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
- return IRQ_NONE;
-
- tasklet_schedule(&dev->irq_tasklet);
-
- return IRQ_HANDLED;
-}
-
-static void mt7915_pci_init_hif2(struct mt7915_dev *dev)
-{
- struct mt7915_hif *hif;
-
- dev->hif_idx = ++hif_idx;
- if (!pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL))
- return;
-
- mt76_wr(dev, MT_PCIE_RECOG_ID, dev->hif_idx | MT_PCIE_RECOG_ID_SEM);
-
- hif = mt7915_pci_get_hif2(dev);
- if (!hif)
- return;
-
- dev->hif2 = hif;
-
- mt76_wr(dev, MT_INT1_MASK_CSR, 0);
-
- if (devm_request_irq(dev->mt76.dev, hif->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME "-hif", dev)) {
- mt7915_put_hif2(hif);
- hif = NULL;
- }
-
- /* master switch of PCIe tnterrupt enable */
- mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ return mt7915_pci_get_hif2(hif_idx);
}
static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
static int mt7915_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static const struct mt76_driver_ops drv_ops = {
- /* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
- .survey_flags = SURVEY_INFO_TIME_TX |
- SURVEY_INFO_TIME_RX |
- SURVEY_INFO_TIME_BSS_RX,
- .token_size = MT7915_TOKEN_SIZE,
- .tx_prepare_skb = mt7915_tx_prepare_skb,
- .tx_complete_skb = mt7915_tx_complete_skb,
- .rx_skb = mt7915_queue_rx_skb,
- .rx_check = mt7915_rx_check,
- .rx_poll_complete = mt7915_rx_poll_complete,
- .sta_ps = mt7915_sta_ps,
- .sta_add = mt7915_mac_sta_add,
- .sta_remove = mt7915_mac_sta_remove,
- .update_survey = mt7915_update_channel,
- };
struct mt7915_dev *dev;
struct mt76_dev *mdev;
+ struct mt7915_hif *hif2;
+ int irq;
int ret;
ret = pcim_enable_device(pdev);
mt76_pci_disable_aspm(pdev);
- if (id->device == 0x7916)
+ if (id->device == 0x7916 || id->device == 0x790a)
return mt7915_pci_hif2_probe(pdev);
- mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7915_ops,
- &drv_ops);
- if (!mdev)
- return -ENOMEM;
+ dev = mt7915_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ id->device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
- dev = container_of(mdev, struct mt7915_dev, mt76);
+ mdev = &dev->mt76;
+ hif2 = mt7915_pci_init_hif2(pdev);
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
- goto free;
+ goto free_device;
- ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq);
+ irq = pdev->irq;
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
- goto error;
-
- tasklet_setup(&dev->irq_tasklet, mt7915_irq_tasklet);
+ goto free_irq_vector;
mt76_wr(dev, MT_INT_MASK_CSR, 0);
/* master switch of PCIe tnterrupt enable */
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
- ret = devm_request_irq(mdev->dev, pdev->irq, mt7915_irq_handler,
- IRQF_SHARED, KBUILD_MODNAME, dev);
- if (ret)
- goto error;
+ if (hif2) {
+ dev->hif2 = hif2;
- mt7915_pci_init_hif2(dev);
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
+
+ ret = devm_request_irq(mdev->dev, dev->hif2->irq,
+ mt7915_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME "-hif", dev);
+ if (ret)
+ goto free_hif2;
+ }
ret = mt7915_register_device(dev);
if (ret)
- goto free_irq;
+ goto free_hif2_irq;
return 0;
-free_irq:
- devm_free_irq(mdev->dev, pdev->irq, dev);
-error:
+
+free_hif2_irq:
+ if (dev->hif2)
+ devm_free_irq(mdev->dev, dev->hif2->irq, dev);
+free_hif2:
+ if (dev->hif2)
+ put_device(dev->hif2->dev);
+ devm_free_irq(mdev->dev, irq, dev);
+free_irq_vector:
pci_free_irq_vectors(pdev);
-free:
+free_device:
mt76_free_device(&dev->mt76);
return ret;
mt7915_unregister_device(dev);
}
-static struct pci_driver mt7915_hif_driver = {
+struct pci_driver mt7915_hif_driver = {
.name = KBUILD_MODNAME "_hif",
.id_table = mt7915_hif_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_hif_remove,
};
-static struct pci_driver mt7915_pci_driver = {
+struct pci_driver mt7915_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mt7915_pci_device_table,
.probe = mt7915_pci_probe,
.remove = mt7915_pci_remove,
};
-static int __init mt7915_init(void)
-{
- int ret;
-
- ret = pci_register_driver(&mt7915_hif_driver);
- if (ret)
- return ret;
-
- ret = pci_register_driver(&mt7915_pci_driver);
- if (ret)
- pci_unregister_driver(&mt7915_hif_driver);
-
- return ret;
-}
-
-static void __exit mt7915_exit(void)
-{
- pci_unregister_driver(&mt7915_pci_driver);
- pci_unregister_driver(&mt7915_hif_driver);
-}
-
-module_init(mt7915_init);
-module_exit(mt7915_exit);
-
MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
MODULE_FIRMWARE(MT7915_ROM_PATCH);
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(MT7916_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7916_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7916_ROM_PATCH);
#ifndef __MT7915_REGS_H
#define __MT7915_REGS_H
+struct __map {
+ u32 phys;
+ u32 maps;
+ u32 size;
+};
+
+/* used to differentiate between generations */
+struct mt7915_reg_desc {
+ const u32 *reg_rev;
+ const u32 *offs_rev;
+ const struct __map *map;
+ u32 map_size;
+};
+
+enum reg_rev {
+ INT_SOURCE_CSR,
+ INT_MASK_CSR,
+ INT1_SOURCE_CSR,
+ INT1_MASK_CSR,
+ INT_MCU_CMD_SOURCE,
+ INT_MCU_CMD_EVENT,
+ __MT_REG_MAX,
+};
+
+enum offs_rev {
+ TMAC_CDTR,
+ TMAC_ODTR,
+ TMAC_ATCR,
+ TMAC_TRCR0,
+ TMAC_ICR0,
+ TMAC_ICR1,
+ TMAC_CTCR0,
+ TMAC_TFCR0,
+ MDP_BNRCFR0,
+ MDP_BNRCFR1,
+ ARB_DRNGR0,
+ ARB_SCR,
+ RMAC_MIB_AIRTIME14,
+ AGG_AWSCR0,
+ AGG_PCR0,
+ AGG_ACR0,
+ AGG_MRCR,
+ AGG_ATCR1,
+ AGG_ATCR3,
+ LPON_UTTR0,
+ LPON_UTTR1,
+ LPON_FRCR,
+ MIB_SDR3,
+ MIB_SDR4,
+ MIB_SDR5,
+ MIB_SDR7,
+ MIB_SDR8,
+ MIB_SDR9,
+ MIB_SDR10,
+ MIB_SDR11,
+ MIB_SDR12,
+ MIB_SDR13,
+ MIB_SDR14,
+ MIB_SDR15,
+ MIB_SDR16,
+ MIB_SDR17,
+ MIB_SDR18,
+ MIB_SDR19,
+ MIB_SDR20,
+ MIB_SDR21,
+ MIB_SDR22,
+ MIB_SDR23,
+ MIB_SDR24,
+ MIB_SDR25,
+ MIB_SDR27,
+ MIB_SDR28,
+ MIB_SDR29,
+ MIB_SDRVEC,
+ MIB_SDR31,
+ MIB_SDR32,
+ MIB_SDRMUBF,
+ MIB_DR8,
+ MIB_DR9,
+ MIB_DR11,
+ MIB_MB_SDR0,
+ MIB_MB_SDR1,
+ TX_AGG_CNT,
+ TX_AGG_CNT2,
+ MIB_ARNG,
+ WTBLON_TOP_WDUCR,
+ WTBL_UPDATE,
+ PLE_FL_Q_EMPTY,
+ PLE_FL_Q_CTRL,
+ PLE_AC_QEMPTY,
+ PLE_FREEPG_CNT,
+ PLE_FREEPG_HEAD_TAIL,
+ PLE_PG_HIF_GROUP,
+ PLE_HIF_PG_INFO,
+ AC_OFFSET,
+ __MT_OFFS_MAX,
+};
+
+#define __REG(id) (dev->reg.reg_rev[(id)])
+#define __OFFS(id) (dev->reg.offs_rev[(id)])
+
/* MCU WFDMA0 */
#define MT_MCU_WFDMA0_BASE 0x2000
#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
+
#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
/* MCU WFDMA1 */
#define MT_MCU_WFDMA1_BASE 0x3000
#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
-#define MT_MCU_INT_EVENT MT_MCU_WFDMA1(0x108)
+#define MT_MCU_INT_EVENT __REG(INT_MCU_CMD_EVENT)
#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
-#define MT_PLE_BASE 0x8000
+/* PLE */
+#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_FL_Q_EMPTY 0x0b0
-#define MT_FL_Q0_CTRL 0x1b0
-#define MT_FL_Q2_CTRL 0x1b8
-#define MT_FL_Q3_CTRL 0x1bc
-
-#define MT_PLE_FREEPG_CNT MT_PLE(0x100)
-#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104)
-#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110)
-#define MT_PLE_HIF_PG_INFO MT_PLE(0x114)
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \
- ((n) << 2))
+#define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY))
+#define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL))
+#define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8)
+#define MT_FL_Q3_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0xc)
+
+#define MT_PLE_FREEPG_CNT MT_PLE(__OFFS(PLE_FREEPG_CNT))
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(__OFFS(PLE_FREEPG_HEAD_TAIL))
+#define MT_PLE_PG_HIF_GROUP MT_PLE(__OFFS(PLE_PG_HIF_GROUP))
+#define MT_PLE_HIF_PG_INFO MT_PLE(__OFFS(PLE_HIF_PG_INFO))
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(__OFFS(PLE_AC_QEMPTY) + \
+ __OFFS(AC_OFFSET) * \
+ (ac) + ((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
-#define MT_PSE_BASE 0xc000
+#define MT_PSE_BASE 0x820c8000
#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
-#define MT_MDP_BASE 0xf000
+/* WF MDP TOP */
+#define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000)
#define MT_MDP_DCR1 MT_MDP(0x004)
#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
-#define MT_MDP_BNRCFR0(_band) MT_MDP(0x070 + ((_band) << 8))
+#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
+ ((_band) << 8))
#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
-#define MT_MDP_BNRCFR1(_band) MT_MDP(0x074 + ((_band) << 8))
+#define MT_MDP_BNRCFR1(_band) MT_MDP(__OFFS(MDP_BNRCFR1) + \
+ ((_band) << 8))
#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
#define MT_MDP_TO_HIF 0
#define MT_MDP_TO_WM 1
-/* TMAC: band 0(0x21000), band 1(0xa1000) */
-#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
-#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
-#define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, 0x094)
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CDTR))
+ #define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ODTR))
#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
-#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, 0x098)
+#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ATCR))
#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
-#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TRCR0))
#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
-#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
-#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR0))
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
-#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4)
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR1))
#define MT_IFS_EIFS_CCK GENMASK(8, 0)
-#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CTCR0))
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
-#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
+#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TFCR0))
-#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+/* WF DMA TOP: band 0(0x820e7000),band 1(0x820f7000) */
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
-/* ETBF: band 0(0x24000), band 1(0xa4000) */
-#define MT_WF_ETBF_BASE(_band) ((_band) ? 0xa4000 : 0x24000)
+/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
-/* LPON: band 0(0x24200), band 1(0xa4200) */
-#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+/* LPON: band 0(0x820eb000), band 1(0x820fb000) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
-#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
-#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, 0x084)
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR0))
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR1))
+#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, __OFFS(LPON_FRCR))
-#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + (n) * 4)
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 1))
+#define MT_LPON_TCR_MT7916(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 4))
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
#define MT_LPON_TCR_SW_ADJUST BIT(1)
#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
-/* MIB: band 0(0x24800), band 1(0xa4800) */
+/* MIB: band 0(0x820ed000), band 1(0x820fd000) */
/* These counters are (mostly?) clear-on-read. So, some should not
* be read at all in case firmware is already reading them. These
* are commented with 'DNR' below. The DNR stats will be read by querying
* the firmware API for the appropriate message. For counters the driver
* does read, the driver should accumulate the counters.
*/
-#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR3))
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR3_FCS_ERR_MASK_MT7916 GENMASK(31, 16)
-#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018)
+#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR4))
#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
/* rx mpdu counter, full 32 bits */
-#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c)
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR5))
#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024)
+#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR7))
#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028)
+#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR8))
#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
/* aka CCA_NAV_TX_TIME */
-#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c)
-#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
+#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030)
-#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
+#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
-#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034)
+#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR11))
#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
/* tx ampdu cnt, full 32 bits */
-#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038)
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR12))
-#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c)
+#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR13))
#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
/* counts all mpdus in ampdu, regardless of success */
-#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR14))
#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916 GENMASK(31, 0)
/* counts all successfully tx'd mpdus in ampdu */
-#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR15))
#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
/* in units of 'us' */
-#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c)
+#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050)
+#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
/* units are us */
-#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054)
+#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058)
+#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c)
+#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
/* rx ampdu count, 32-bit */
-#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060)
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
/* rx ampdu bytes count, 32-bit */
-#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064)
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR23))
/* rx ampdu valid subframe count */
-#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068)
+#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR24))
#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916 GENMASK(31, 0)
/* rx ampdu valid subframe bytes count, 32bits */
-#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c)
+#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR25))
/* remaining windows protected stats */
-#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074)
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR27))
#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078)
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR28))
#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c)
-#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR29))
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916 GENMASK(15, 0)
-#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080)
+#define MT_MIB_SDRVEC(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRVEC))
#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916 GENMASK(31, 16)
/* rx blockack count, 32 bits */
-#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084)
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR31))
-#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR32))
#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0)
-#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c)
-#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT_MASK_MT7916 GENMASK(31, 16)
-#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
+#define MT_MIB_SDRMUBF(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
/* 36, 37 both DNR */
-#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
-#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
-#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, __OFFS(MIB_DR8))
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, __OFFS(MIB_DR9))
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, __OFFS(MIB_DR11))
-#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR0) + (n))
#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
-#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, 0x104 + ((n) << 4))
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR1) + (n))
#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
-#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, 0x0a8 + ((n) << 2))
-#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, 0x164 + ((n) << 2))
-#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x4b8 + ((n) << 2))
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x518 + (n))
+#define MT_MIB_MB_BFTF(_band, n) MT_WF_MIB(_band, 0x510 + (n))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT) + \
+ ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT2) + \
+ ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, __OFFS(MIB_ARNG) + \
+ ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
-#define MT_WTBLON_TOP_BASE 0x34000
+/* WTBLON TOP */
+#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
-#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x0)
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_TOP_WDUCR))
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
-#define MT_WTBL_UPDATE MT_WTBLON_TOP(0x030)
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_BASE 0x38000
+/* WTBL */
+#define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
- FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
- FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
-/* AGG: band 0(0x20800), band 1(0xa0800) */
-#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+/* AGG: band 0(0x820e2000), band 1(0x820f2000) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
-#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
-#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, 0x06c + (_n) * 4)
+#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
+ (_n) * 4))
+#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
+ (_n) * 4))
#define MT_AGG_PCR0_MM_PROT BIT(0)
#define MT_AGG_PCR0_GF_PROT BIT(1)
#define MT_AGG_PCR0_BW20_PROT BIT(2)
#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
-#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x084)
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR0))
#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
-#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, 0x098)
-#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
-#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
-#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
+#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
+#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
+#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
+#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
-#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, 0x0f0)
-#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
+#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
+#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
-/* ARB: band 0(0x20c00), band 1(0xa0c00) */
-#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+/* ARB: band 0(0x820e3000), band 1(0x820f3000) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
-#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, __OFFS(ARB_SCR))
#define MT_ARB_SCR_TX_DISABLE BIT(8)
#define MT_ARB_SCR_RX_DISABLE BIT(9)
-#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
+#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, (__OFFS(ARB_DRNGR0) + \
+ (_n) * 4))
-/* RMAC: band 0(0x21400), band 1(0xa1400) */
-#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+/* RMAC: band 0(0x820e5000), band 1(0x820f5000) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
-
-#define MT_RX_DATA_RING_BASE MT_WFDMA0(0x500)
-
-#define MT_WFDMA0_RX_RING0_EXT_CTRL MT_WFDMA0(0x680)
-#define MT_WFDMA0_RX_RING1_EXT_CTRL MT_WFDMA0(0x684)
-#define MT_WFDMA0_RX_RING2_EXT_CTRL MT_WFDMA0(0x688)
+#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
/* WFDMA1 */
#define MT_WFDMA1_BASE 0xd5000
#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_MCU_CMD MT_WFDMA1(0x1f0)
-#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
-#define MT_MCU_CMD_STOP_DMA BIT(2)
-#define MT_MCU_CMD_RESET_DONE BIT(3)
-#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
-#define MT_MCU_CMD_NORMAL_STATE BIT(5)
-#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-
#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
-#define MT_TX_RING_BASE MT_WFDMA1(0x300)
-#define MT_RX_EVENT_RING_BASE MT_WFDMA1(0x500)
-
-#define MT_WFDMA1_TX_RING0_EXT_CTRL MT_WFDMA1(0x600)
-#define MT_WFDMA1_TX_RING1_EXT_CTRL MT_WFDMA1(0x604)
-#define MT_WFDMA1_TX_RING2_EXT_CTRL MT_WFDMA1(0x608)
-#define MT_WFDMA1_TX_RING3_EXT_CTRL MT_WFDMA1(0x60c)
-#define MT_WFDMA1_TX_RING4_EXT_CTRL MT_WFDMA1(0x610)
-#define MT_WFDMA1_TX_RING5_EXT_CTRL MT_WFDMA1(0x614)
-#define MT_WFDMA1_TX_RING6_EXT_CTRL MT_WFDMA1(0x618)
-#define MT_WFDMA1_TX_RING7_EXT_CTRL MT_WFDMA1(0x61c)
-
-#define MT_WFDMA1_TX_RING16_EXT_CTRL MT_WFDMA1(0x640)
-#define MT_WFDMA1_TX_RING17_EXT_CTRL MT_WFDMA1(0x644)
-#define MT_WFDMA1_TX_RING18_EXT_CTRL MT_WFDMA1(0x648)
-#define MT_WFDMA1_TX_RING19_EXT_CTRL MT_WFDMA1(0x64c)
-#define MT_WFDMA1_TX_RING20_EXT_CTRL MT_WFDMA1(0x650)
-#define MT_WFDMA1_TX_RING21_EXT_CTRL MT_WFDMA1(0x654)
-#define MT_WFDMA1_TX_RING22_EXT_CTRL MT_WFDMA1(0x658)
-#define MT_WFDMA1_TX_RING23_EXT_CTRL MT_WFDMA1(0x65c)
-
-#define MT_WFDMA1_RX_RING0_EXT_CTRL MT_WFDMA1(0x680)
-#define MT_WFDMA1_RX_RING1_EXT_CTRL MT_WFDMA1(0x684)
-#define MT_WFDMA1_RX_RING2_EXT_CTRL MT_WFDMA1(0x688)
-#define MT_WFDMA1_RX_RING3_EXT_CTRL MT_WFDMA1(0x68c)
-
/* WFDMA CSR */
#define MT_WFDMA_EXT_CSR_BASE 0xd7000
#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
-#define MT_INT_SOURCE_CSR MT_WFDMA_EXT_CSR(0x10)
-#define MT_INT_MASK_CSR MT_WFDMA_EXT_CSR(0x14)
-#define MT_INT_RX_DONE_DATA0 BIT(16)
-#define MT_INT_RX_DONE_DATA1 BIT(17)
-#define MT_INT_RX_DONE_WM BIT(0)
-#define MT_INT_RX_DONE_WA BIT(1)
-#define MT_INT_RX_DONE_WA_EXT BIT(2)
-#define MT_INT_RX_DONE_ALL (GENMASK(2, 0) | GENMASK(17, 16))
-#define MT_INT_TX_DONE_MCU_WA BIT(15)
-#define MT_INT_TX_DONE_FWDL BIT(26)
-#define MT_INT_TX_DONE_MCU_WM BIT(27)
-#define MT_INT_TX_DONE_BAND0 BIT(30)
-#define MT_INT_TX_DONE_BAND1 BIT(31)
-
-#define MT_INT_BAND1_MASK (MT_INT_RX_DONE_WA_EXT | \
- MT_INT_TX_DONE_BAND1)
-
-#define MT_INT_MCU_CMD BIT(29)
-
-#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
- MT_INT_TX_DONE_MCU_WM | \
- MT_INT_TX_DONE_FWDL)
-
#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR(0x30)
#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
-#define MT_INT1_SOURCE_CSR MT_WFDMA_EXT_CSR(0x88)
-#define MT_INT1_MASK_CSR MT_WFDMA_EXT_CSR(0x8c)
-
-#define MT_PCIE_RECOG_ID MT_WFDMA_EXT_CSR(0x90)
+#define MT_PCIE_RECOG_ID 0xd7090
#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
#define MT_PCIE_RECOG_ID_SEM BIT(31)
/* WFDMA0 PCIE1 */
-#define MT_WFDMA0_PCIE1_BASE 0xd8000
-#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA0_PCIE1_BASE 0xd8000
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
-#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
/* WFDMA1 PCIE1 */
-#define MT_WFDMA1_PCIE1_BASE 0xd9000
-#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA1_PCIE1_BASE + (ofs))
-#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
-#define MT_TOP_RGU_BASE 0xf0000
-#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
-#define MT_TOP_PWR_KEY (0x5746 << 16)
-#define MT_TOP_PWR_SW_RST BIT(0)
-#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
-#define MT_TOP_PWR_HW_CTRL BIT(4)
-#define MT_TOP_PWR_PWR_ON BIT(7)
+/* WFDMA COMMON */
+#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
+#define __TXQ(q) (__RXQ(q) + __MT_RXQ_MAX)
+
+#define MT_Q_ID(q) (dev->q_id[(q)])
+#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
+ MT_WFDMA1_BASE : MT_WFDMA0_BASE)
+
+#define MT_MCUQ_ID(q) MT_Q_ID(q)
+#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
+#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
+
+#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
+#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
+#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+
+#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
+ MT_MCUQ_ID(q)* 0x4)
+#define MT_RXQ_EXT_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+ MT_RXQ_ID(q)* 0x4)
+#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
+#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+
+#define MT_INT1_SOURCE_CSR __REG(INT1_SOURCE_CSR)
+#define MT_INT1_MASK_CSR __REG(INT1_MASK_CSR)
+
+#define MT_INT_RX_DONE_BAND0 BIT(16)
+#define MT_INT_RX_DONE_BAND1 BIT(17)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE_WA_MAIN BIT(1)
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_RX_DONE_BAND0_MT7916 BIT(22)
+#define MT_INT_RX_DONE_BAND1_MT7916 BIT(23)
+#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
+#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+
+#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
+ MT_INT_RX(MT_RXQ_MCU_WA))
+
+#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_EXT) | \
+ MT_INT_RX(MT_RXQ_EXT_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
+ MT_INT_BAND0_RX_DONE | \
+ MT_INT_BAND1_RX_DONE)
+
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+ MT_INT_TX_MCU(MT_MCUQ_FWDL))
-#define MT_INFRA_CFG_BASE 0xf1000
-#define MT_INFRA(ofs) (MT_INFRA_CFG_BASE + (ofs))
+#define MT_MCU_CMD __REG(INT_MCU_CMD_SOURCE)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
-#define MT_HIF_REMAP_L1 MT_INFRA(0x1ac)
+/* TOP RGU */
+#define MT_TOP_RGU_BASE 0x18000000
+#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
+#define MT_TOP_PWR_KEY (0x5746 << 16)
+#define MT_TOP_PWR_SW_RST BIT(0)
+#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
+#define MT_TOP_PWR_HW_CTRL BIT(4)
+#define MT_TOP_PWR_PWR_ON BIT(7)
+
+/* l1/l2 remap */
+#define MT_HIF_REMAP_L1 0xf11ac
+#define MT_HIF_REMAP_L1_MT7916 0xfe260
#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
#define MT_HIF_REMAP_BASE_L1 0xe0000
-#define MT_HIF_REMAP_L2 MT_INFRA(0x1b0)
+#define MT_HIF_REMAP_L2 0xf11b0
#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
-#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_HIF_REMAP_L2_MT7916 0x1b8
+#define MT_HIF_REMAP_L2_MASK_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_L2_OFFSET_MT7916 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2_MT7916 0x40000
+
+#define MT_INFRA_BASE 0x18000000
+#define MT_WFSYS0_PHY_START 0x18400000
+#define MT_WFSYS1_PHY_START 0x18800000
+#define MT_WFSYS1_PHY_END 0x18bfffff
+#define MT_CBTOP1_PHY_START 0x70000000
+#define MT_CBTOP1_PHY_END 0x7fffffff
+#define MT_CBTOP2_PHY_START 0xf0000000
+#define MT_CBTOP2_PHY_END 0xffffffff
+
+/* FW MODE SYNC */
+#define MT_SWDEF_MODE 0x41f23c
+#define MT_SWDEF_MODE_MT7916 0x41143c
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
#define MT_DIC_CMD_REG_BASE 0x41f000
#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
-#define MT_SWDEF_BASE 0x41f200
-#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
-#define MT_SWDEF_MODE MT_SWDEF(0x3c)
-#define MT_SWDEF_NORMAL_MODE 0
-#define MT_SWDEF_ICAP_MODE 1
-#define MT_SWDEF_SPECTRUM_MODE 2
-
+/* LED */
#define MT_LED_TOP_BASE 0x18013000
#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
+#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+
+/* MT TOP */
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
-#define MT_TOP_LPCR_HOST_BAND0 MT_TOP(0x10)
+#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
+
+#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
-#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
-#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
-#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
-
#define MT_HW_BOUND 0x70010020
-#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
-#define MT_PCIE1_MAC_BASE 0x74020000
-#define MT_PCIE1_MAC(ofs) (MT_PCIE1_MAC_BASE + (ofs))
-#define MT_PCIE1_MAC_INT_ENABLE MT_PCIE1_MAC(0x188)
-
+/* PCIE MAC */
#define MT_PCIE_MAC_BASE 0x74030000
#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
+#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
+
+/* PP TOP */
+#define MT_WF_PP_TOP_BASE 0x820cc000
+#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
+
+#define MT_WF_PP_TOP_RXQ_WFDMA_CF_5 MT_WF_PP_TOP(0x0e8)
+#define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK BIT(6)
+
#define MT_WF_IRPI_BASE 0x83006000
#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + ((ofs) << 16))
#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16))
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
-#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
#define MT_MCU_WM_CIRQ_BASE 0x89010000
#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
u32 band[2];
};
-#define REG_BAND(_reg) \
- { .band[0] = MT_##_reg(0), .band[1] = MT_##_reg(1) }
-#define REG_BAND_IDX(_reg, _idx) \
- { .band[0] = MT_##_reg(0, _idx), .band[1] = MT_##_reg(1, _idx) }
-
-static const struct reg_band reg_backup_list[] = {
- REG_BAND_IDX(AGG_PCR0, 0),
- REG_BAND_IDX(AGG_PCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 0),
- REG_BAND_IDX(AGG_AWSCR0, 1),
- REG_BAND_IDX(AGG_AWSCR0, 2),
- REG_BAND_IDX(AGG_AWSCR0, 3),
- REG_BAND(AGG_MRCR),
- REG_BAND(TMAC_TFCR0),
- REG_BAND(TMAC_TCR0),
- REG_BAND(AGG_ATCR1),
- REG_BAND(AGG_ATCR3),
- REG_BAND(TMAC_TRCR0),
- REG_BAND(TMAC_ICR0),
- REG_BAND_IDX(ARB_DRNGR0, 0),
- REG_BAND_IDX(ARB_DRNGR0, 1),
- REG_BAND(WF_RFCR),
- REG_BAND(WF_RFCR1),
-};
+#define REG_BAND(_list, _reg) \
+ { _list.band[0] = MT_##_reg(0); \
+ _list.band[1] = MT_##_reg(1); }
+#define REG_BAND_IDX(_list, _reg, _idx) \
+ { _list.band[0] = MT_##_reg(0, _idx); \
+ _list.band[1] = MT_##_reg(1, _idx); }
+
+#define TM_REG_MAX_ID 17
+static struct reg_band reg_backup_list[TM_REG_MAX_ID];
+
static int
mt7915_tm_set_tx_power(struct mt7915_phy *phy)
mt7915_tm_set_slot_time(phy, slot_time, sifs);
return mt7915_tm_set_wmm_qid(dev,
- mt7915_lmac_mapping(dev, IEEE80211_AC_BE),
+ mt76_connac_lmac_mapping(IEEE80211_AC_BE),
aifsn, cw, cw, 0);
}
u32 *b = phy->test.reg_backup;
int i;
+ REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
+ REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
+ REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
+ REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
+ REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
+ REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
+ REG_BAND(reg_backup_list[6], AGG_MRCR);
+ REG_BAND(reg_backup_list[7], TMAC_TFCR0);
+ REG_BAND(reg_backup_list[8], TMAC_TCR0);
+ REG_BAND(reg_backup_list[9], AGG_ATCR1);
+ REG_BAND(reg_backup_list[10], AGG_ATCR3);
+ REG_BAND(reg_backup_list[11], TMAC_TRCR0);
+ REG_BAND(reg_backup_list[12], TMAC_ICR0);
+ REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
+ REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
+ REG_BAND(reg_backup_list[15], WF_RFCR);
+ REG_BAND(reg_backup_list[16], WF_RFCR1);
+
if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
for (i = 0; i < n_regs; i++)
mt76_wr(dev, reg_backup_list[i].band[ext_phy], b[i]);
void *rx, *rssi;
u16 fcs_err;
int i;
+ u32 cnt;
rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
if (!rx)
nla_nest_end(msg, rx);
- fcs_err = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
- MT_MIB_SDR3_FCS_ERR_MASK);
+ cnt = mt76_rr(dev, MT_MIB_SDR3(ext_phy));
+ fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
+
q = ext_phy ? MT_RXQ_EXT : MT_RXQ_MAIN;
mphy->test.rx_stats.packets[q] += fcs_err;
mphy->test.rx_stats.fcs_error[q] += fcs_err;
return 0;
}
-static void
-mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt7921_dev *dev = priv;
-
- mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
-}
-
static int
mt7921_pm_set(void *data, u64 val)
{
mutex_lock(&dev->mt76.mutex);
- if (val == pm->enable)
+ if (val == pm->enable_user)
goto out;
- if (!pm->enable) {
+ if (!pm->enable_user) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
pm->enable = false;
mt76_connac_pm_wake(&dev->mphy, pm);
- ieee80211_iterate_active_interfaces(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_pm_interface_iter, dev);
-
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
-
- pm->enable = val;
+ pm->enable_user = val;
+ mt7921_set_runtime_pm(dev);
mt76_connac_power_save_sched(&dev->mphy, pm);
out:
mutex_unlock(&dev->mt76.mutex);
{
struct mt7921_dev *dev = data;
- *val = dev->pm.enable;
+ *val = dev->pm.enable_user;
return 0;
}
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(dev->mphy.hw->conf.flags & IEEE80211_CONF_MONITOR);
bool enable = !!val;
mt7921_mutex_acquire(dev);
- if (pm->ds_enable != enable) {
- mt76_connac_mcu_set_deep_sleep(&dev->mt76, enable);
- pm->ds_enable = enable;
- }
+ if (pm->ds_enable_user == enable)
+ goto out;
+
+ pm->ds_enable_user = enable;
+ pm->ds_enable = enable && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+out:
mt7921_mutex_release(dev);
return 0;
{
struct mt7921_dev *dev = data;
- *val = dev->pm.ds_enable;
+ *val = dev->pm.ds_enable_user;
return 0;
}
mt76_wr(dev, MT_WFDMA0_TX_RING17_EXT_CTRL, PREFETCH(0x380, 0x4));
}
-static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
-{
- static const struct {
- u32 phys;
- u32 mapped;
- u32 size;
- } fixed_map[] = {
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
- { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
- { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
- { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
- { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
- { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
- { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
- { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
- { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
- { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
- { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
- { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
- { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
- { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
- { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
- { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
- { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
- { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
- { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
- { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
- { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
- { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
- { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
- { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
- { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
- { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
- { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
- { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
- };
- int i;
-
- if (addr < 0x100000)
- return addr;
-
- for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
- u32 ofs;
-
- if (addr < fixed_map[i].phys)
- continue;
-
- ofs = addr - fixed_map[i].phys;
- if (ofs > fixed_map[i].size)
- continue;
-
- return fixed_map[i].mapped + ofs;
- }
-
- if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
- return mt7921_reg_map_l1(dev, addr);
-
- dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
- addr);
-
- return 0;
-}
-
-static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rr(mdev, addr);
-}
-
-static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- dev->bus_ops->wr(mdev, addr, val);
-}
-
-static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- u32 addr = __mt7921_reg_addr(dev, offset);
-
- return dev->bus_ops->rmw(mdev, addr, mask, val);
-}
-
static int mt7921_dma_disable(struct mt7921_dev *dev, bool force)
{
if (force) {
int mt7921_dma_init(struct mt7921_dev *dev)
{
- struct mt76_bus_ops *bus_ops;
int ret;
- dev->phy.dev = dev;
- dev->phy.mt76 = &dev->mt76.phy;
- dev->mt76.phy.priv = &dev->phy;
- dev->bus_ops = dev->mt76.bus;
- bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
- GFP_KERNEL);
- if (!bus_ops)
- return -ENOMEM;
-
- bus_ops->rr = mt7921_rr;
- bus_ops->wr = mt7921_wr;
- bus_ops->rmw = mt7921_rmw;
- dev->mt76.bus = bus_ops;
-
mt76_dma_attach(&dev->mt76);
ret = mt7921_dma_disable(dev, true);
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
-
- /* TODO: mt7921s run sleep mode on default */
- if (mt76_is_mmio(&dev->mt76)) {
- dev->pm.enable = true;
- dev->pm.ds_enable = true;
- }
+ dev->pm.enable_user = true;
+ dev->pm.enable = true;
+ dev->pm.ds_enable_user = true;
+ dev->pm.ds_enable = true;
if (mt76_is_sdio(&dev->mt76))
hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u8 q = mt7921_lmac_mapping(dev, i);
+ u8 q = mt76_connac_lmac_mapping(i);
u32 tx_cur = tx_time[q];
u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
static int mt7921_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
struct mt7921_sta *msta = (struct mt7921_sta *)status->wcid;
+ __le32 *rxd = (__le32 *)skb->data;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct ieee80211_hdr hdr;
- struct ethhdr eth_hdr;
- __le32 *rxd = (__le32 *)skb->data;
__le32 qos_ctrl, ht_ctrl;
if (FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, le32_to_cpu(rxd[3])) !=
vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
/* store the info from RXD and ethhdr to avoid being overridden */
- memcpy(ð_hdr, skb->data + hdr_gap, sizeof(eth_hdr));
hdr.frame_control = FIELD_GET(MT_RXD6_FRAME_CONTROL, rxd[6]);
hdr.seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, rxd[8]);
qos_ctrl = FIELD_GET(MT_RXD8_QOS_CTL, rxd[8]);
ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
break;
case IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_source);
break;
case IEEE80211_FCTL_TODS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
break;
case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
- ether_addr_copy(hdr.addr3, eth_hdr.h_dest);
- ether_addr_copy(hdr.addr4, eth_hdr.h_source);
+ ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
+ ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
break;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
- if (eth_hdr.h_proto == htons(ETH_P_AARP) ||
- eth_hdr.h_proto == htons(ETH_P_IPX))
+ if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
+ eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
- else if (eth_hdr.h_proto >= htons(ETH_P_802_3_MIN))
+ else if (eth_hdr->h_proto >= cpu_to_be16(ETH_P_802_3_MIN))
ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
else
skb_pull(skb, 2);
} else {
p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
- mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
+ mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
break;
case MT_PHY_TYPE_HT:
case MT_PHY_TYPE_HT_GF:
- rate.mcs += (rate.nss - 1) * 8;
if (rate.mcs > 31)
goto out;
out:
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(mt7921_mac_add_txs);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
delta = dev->pm.idle_timeout;
if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
- test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+ test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
+ dev->fw_assert)
+ goto out;
+
+ if (mutex_is_locked(&dev->mt76.mutex))
+ /* if mt76 mutex is held we should not put the device
+ * to sleep since we are currently accessing device
+ * register map. We need to wait for the next power_save
+ * trigger.
+ */
goto out;
if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
cancel_delayed_work_sync(&dev->pm.ps_work);
cancel_work_sync(&dev->pm.wake_work);
+ cancel_work_sync(&dev->reset_work);
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt7921_mutex_acquire(dev);
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7921_mcu_add_key(dev, vif, msta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
- err = mt7921_mcu_add_key(dev, vif, mvif->wep_sta, key, cmd);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif,
+ &mvif->wep_sta->bip,
+ key, MCU_UNI_CMD(STA_REC_UPDATE),
+ &mvif->wep_sta->wcid, cmd);
out:
mt7921_mutex_release(dev);
return err;
}
+static void
+mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+
+ mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
+}
+
+void mt7921_set_runtime_pm(struct mt7921_dev *dev)
+{
+ struct ieee80211_hw *hw = dev->mphy.hw;
+ struct mt76_connac_pm *pm = &dev->pm;
+ bool monitor = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+
+ pm->enable = pm->enable_user && !monitor;
+ ieee80211_iterate_active_interfaces(hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_pm_interface_iter, dev);
+ pm->ds_enable = pm->ds_enable_user && !monitor;
+ mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+}
+
static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
mt76_rmw_field(dev, MT_DMA_DCR0(0), MT_DMA_DCR0_RXD_G5_EN,
enabled);
mt76_wr(dev, MT_WF_RFCR(0), phy->rxfilter);
+ mt7921_set_runtime_pm(dev);
}
out:
mt7921_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
/* no need to update right away, we'll get BSS_CHANGED_QOS */
- queue = mt7921_lmac_mapping(dev, queue);
+ queue = mt76_connac_lmac_mapping(queue);
mvif->queue_params[queue] = *params;
return 0;
#define MT_STA_BFER BIT(0)
#define MT_STA_BFEE BIT(1)
-#define FW_FEATURE_SET_ENCRYPT BIT(0)
-#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
-#define FW_FEATURE_ENCRY_MODE BIT(4)
-#define FW_FEATURE_OVERRIDE_ADDR BIT(5)
-
-#define DL_MODE_ENCRYPT BIT(0)
-#define DL_MODE_KEY_IDX GENMASK(2, 1)
-#define DL_MODE_RESET_SEC_IV BIT(3)
-#define DL_MODE_WORKING_PDA_CR4 BIT(4)
-#define DL_CONFIG_ENCRY_MODE_SEL BIT(6)
-#define DL_MODE_NEED_RSP BIT(31)
-
-#define FW_START_OVERRIDE BIT(0)
-#define FW_START_WORKING_PDA_CR4 BIT(2)
-
-#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
-#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
-#define PATCH_SEC_TYPE_INFO 0x2
-
#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
#define PATCH_SEC_ENC_TYPE_AES 0x01
#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
-static enum mcu_cipher_type
-mt7921_mcu_get_cipher(int cipher)
-{
- switch (cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- return MCU_CIPHER_WEP40;
- case WLAN_CIPHER_SUITE_WEP104:
- return MCU_CIPHER_WEP104;
- case WLAN_CIPHER_SUITE_TKIP:
- return MCU_CIPHER_TKIP;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- return MCU_CIPHER_BIP_CMAC_128;
- case WLAN_CIPHER_SUITE_CCMP:
- return MCU_CIPHER_AES_CCMP;
- case WLAN_CIPHER_SUITE_CCMP_256:
- return MCU_CIPHER_CCMP_256;
- case WLAN_CIPHER_SUITE_GCMP:
- return MCU_CIPHER_GCMP;
- case WLAN_CIPHER_SUITE_GCMP_256:
- return MCU_CIPHER_GCMP_256;
- case WLAN_CIPHER_SUITE_SMS4:
- return MCU_CIPHER_WAPI;
- default:
- return MCU_CIPHER_NONE;
- }
-}
-
-static u8 mt7921_mcu_chan_bw(struct cfg80211_chan_def *chandef)
-{
- static const u8 width_to_bw[] = {
- [NL80211_CHAN_WIDTH_40] = CMD_CBW_40MHZ,
- [NL80211_CHAN_WIDTH_80] = CMD_CBW_80MHZ,
- [NL80211_CHAN_WIDTH_80P80] = CMD_CBW_8080MHZ,
- [NL80211_CHAN_WIDTH_160] = CMD_CBW_160MHZ,
- [NL80211_CHAN_WIDTH_5] = CMD_CBW_5MHZ,
- [NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
- [NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
- [NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
- };
-
- if (chandef->width >= ARRAY_SIZE(width_to_bw))
- return 0;
-
- return width_to_bw[chandef->width];
-}
-
static int
mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
{
}
/** starec & wtbl **/
-static int
-mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
- struct ieee80211_key_conf *key, enum set_key_cmd cmd)
-{
- struct mt7921_sta_key_conf *bip = &msta->bip;
- struct sta_rec_sec *sec;
- struct tlv *tlv;
- u32 len = sizeof(*sec);
-
- tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V2, sizeof(*sec));
-
- sec = (struct sta_rec_sec *)tlv;
- sec->add = cmd;
-
- if (cmd == SET_KEY) {
- struct sec_key *sec_key;
- u8 cipher;
-
- cipher = mt7921_mcu_get_cipher(key->cipher);
- if (cipher == MCU_CIPHER_NONE)
- return -EOPNOTSUPP;
-
- sec_key = &sec->key[0];
- sec_key->cipher_len = sizeof(*sec_key);
-
- if (cipher == MCU_CIPHER_BIP_CMAC_128) {
- sec_key->cipher_id = MCU_CIPHER_AES_CCMP;
- sec_key->key_id = bip->keyidx;
- sec_key->key_len = 16;
- memcpy(sec_key->key, bip->key, 16);
-
- sec_key = &sec->key[1];
- sec_key->cipher_id = MCU_CIPHER_BIP_CMAC_128;
- sec_key->cipher_len = sizeof(*sec_key);
- sec_key->key_len = 16;
- memcpy(sec_key->key, key->key, 16);
-
- sec->n_cipher = 2;
- } else {
- sec_key->cipher_id = cipher;
- sec_key->key_id = key->keyidx;
- sec_key->key_len = key->keylen;
- memcpy(sec_key->key, key->key, key->keylen);
-
- if (cipher == MCU_CIPHER_TKIP) {
- /* Rx/Tx MIC keys are swapped */
- memcpy(sec_key->key + 16, key->key + 24, 8);
- memcpy(sec_key->key + 24, key->key + 16, 8);
- }
-
- /* store key_conf for BIP batch update */
- if (cipher == MCU_CIPHER_AES_CCMP) {
- memcpy(bip->key, key->key, key->keylen);
- bip->keyidx = key->keyidx;
- }
-
- len -= sizeof(*sec_key);
- sec->n_cipher = 1;
- }
- } else {
- len -= sizeof(sec->key);
- sec->n_cipher = 0;
- }
- sec->len = cpu_to_le16(len);
-
- return 0;
-}
-
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
-{
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- struct sk_buff *skb;
- int ret;
-
- skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- ret = mt7921_mcu_sta_key_tlv(msta, skb, key, cmd);
- if (ret)
- return ret;
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD(STA_REC_UPDATE), true);
-}
-
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable)
msta->wcid.amsdu = false;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
struct mt7921_sta *msta = (struct mt7921_sta *)params->sta->drv_priv;
return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
-int mt7921_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 power_mode;
- u8 rsv[3];
- } req = {
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD(NIC_POWER_CTRL), &req,
- sizeof(req), false);
-}
-EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
-
static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
{
u32 mode = DL_MODE_NEED_RSP;
if (mt76_is_sdio(&dev->mt76)) {
/* activate again */
ret = __mt7921_mcu_fw_pmctrl(dev);
- if (ret)
- return ret;
-
- ret = __mt7921_mcu_drv_pmctrl(dev);
- if (ret)
- return ret;
+ if (!ret)
+ ret = __mt7921_mcu_drv_pmctrl(dev);
}
out:
return ret;
}
-static u32 mt7921_mcu_gen_dl_mode(u8 feature_set, bool is_wa)
-{
- u32 ret = 0;
-
- ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
- (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
- ret |= (feature_set & FW_FEATURE_ENCRY_MODE) ?
- DL_CONFIG_ENCRY_MODE_SEL : 0;
- ret |= FIELD_PREP(DL_MODE_KEY_IDX,
- FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
- ret |= DL_MODE_NEED_RSP;
- ret |= is_wa ? DL_MODE_WORKING_PDA_CR4 : 0;
-
- return ret;
-}
-
static int
mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
const struct mt7921_fw_trailer *hdr,
region = (const struct mt7921_fw_region *)((const u8 *)hdr -
(hdr->n_region - i) * sizeof(*region));
- mode = mt7921_mcu_gen_dl_mode(region->feature_set, is_wa);
+ mode = mt76_connac_mcu_gen_dl_mode(&dev->mt76,
+ region->feature_set, is_wa);
len = le32_to_cpu(region->len);
addr = le32_to_cpu(region->addr);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
-#define WMM_AIFS_SET BIT(0)
-#define WMM_CW_MIN_SET BIT(1)
-#define WMM_CW_MAX_SET BIT(2)
-#define WMM_TXOP_SET BIT(3)
-#define WMM_PARAM_SET GENMASK(3, 0)
-#define TX_CMD_MODE 1
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct edca {
- u8 queue;
- u8 set;
- u8 aifs;
- u8 cw_min;
+ __le16 cw_min;
__le16 cw_max;
__le16 txop;
- };
+ __le16 aifs;
+ u8 guardtime;
+ u8 acm;
+ } __packed;
struct mt7921_mcu_tx {
- u8 total;
- u8 action;
- u8 valid;
- u8 mode;
-
struct edca edca[IEEE80211_NUM_ACS];
+ u8 bss_idx;
+ u8 qos;
+ u8 wmm_idx;
+ u8 pad;
} __packed req = {
- .valid = true,
- .mode = TX_CMD_MODE,
- .total = IEEE80211_NUM_ACS,
+ .bss_idx = mvif->mt76.idx,
+ .qos = vif->bss_conf.qos,
+ .wmm_idx = mvif->mt76.wmm_idx,
};
- struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mu_edca {
u8 cw_min;
u8 cw_max;
.qos = vif->bss_conf.qos,
.wmm_idx = mvif->mt76.wmm_idx,
};
+ static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
- struct edca *e = &req.edca[ac];
+ struct edca *e = &req.edca[to_aci[ac]];
- e->set = WMM_PARAM_SET;
- e->queue = ac + mvif->mt76.wmm_idx * MT7921_MAX_WMM_SETS;
- e->aifs = q->aifs;
+ e->aifs = cpu_to_le16(q->aifs);
e->txop = cpu_to_le16(q->txop);
if (q->cw_min)
- e->cw_min = fls(q->cw_min);
+ e->cw_min = cpu_to_le16(q->cw_min);
else
- e->cw_min = 5;
+ e->cw_min = cpu_to_le16(5);
if (q->cw_max)
- e->cw_max = cpu_to_le16(fls(q->cw_max));
+ e->cw_max = cpu_to_le16(q->cw_max);
else
e->cw_max = cpu_to_le16(10);
}
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE),
- &req, sizeof(req), true);
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req,
+ sizeof(req), false);
if (ret)
return ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
- int to_aci[] = {1, 0, 2, 3};
if (!mvif->queue_params[ac].mu_edca)
break;
} __packed req = {
.control_ch = chandef->chan->hw_value,
.center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt7921_mcu_chan_bw(chandef),
+ .bw = mt76_connac_chan_bw(chandef),
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy != &dev->phy,
}
EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
-{
- struct mt7921_mcu_eeprom_info req = {
- .addr = cpu_to_le32(round_down(offset, 16)),
- };
- struct mt7921_mcu_eeprom_info *res;
- struct sk_buff *skb;
- int ret;
- u8 *buf;
-
- ret = mt76_mcu_send_and_get_msg(&dev->mt76,
- MCU_EXT_QUERY(EFUSE_ACCESS),
- &req, sizeof(req), true, &skb);
- if (ret)
- return ret;
-
- res = (struct mt7921_mcu_eeprom_info *)skb->data;
- buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
- memcpy(buf, res->data, 16);
- dev_kfree_skb(skb);
-
- return 0;
-}
-
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
MT7921_RXQ_MCU_WM = 0,
};
-struct mt7921_sta_key_conf {
- s8 keyidx;
- u8 key[16];
-};
-
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
unsigned long ampdu_state;
struct mt76_sta_stats stats;
- struct mt7921_sta_key_conf bip;
+ struct mt76_connac_sta_key_conf bip;
};
DECLARE_EWMA(rssi, 10, 8);
#define mt7921_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
-static inline u8 mt7921_lmac_mapping(struct mt7921_dev *dev, u8 ac)
-{
- /* LMAC uses the reverse order of mac80211 AC indexes */
- return 3 - ac;
-}
-
extern const struct ieee80211_ops mt7921_ops;
extern struct pci_driver mt7921_pci_driver;
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
-int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- struct mt7921_sta *msta, struct ieee80211_key_conf *key,
- enum set_key_cmd cmd);
int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif);
int mt7921_mcu_set_eeprom(struct mt7921_dev *dev);
-int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset);
int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
int cmd, int *wait_seq);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
-int mt7921_mcu_restart(struct mt76_dev *dev);
+bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
int mt7921e_driver_own(struct mt7921_dev *dev);
int mt7921s_wfsys_reset(struct mt7921_dev *dev);
int mt7921s_mac_reset(struct mt7921_dev *dev);
int mt7921s_init_reset(struct mt7921_dev *dev);
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data);
+void mt7921_set_runtime_pm(struct mt7921_dev *dev);
#endif
mt76_free_device(&dev->mt76);
}
+static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7921_reg_map_l1(dev, addr);
+
+ dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
+ addr);
+
+ return 0;
+}
+
+static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ u32 addr = __mt7921_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
.token_size = MT7921_TOKEN_SIZE,
.tx_prepare_skb = mt7921e_tx_prepare_skb,
.tx_complete_skb = mt7921e_tx_complete_skb,
+ .rx_check = mt7921e_rx_check,
.rx_skb = mt7921e_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.fw_own = mt7921e_mcu_fw_pmctrl,
};
+ struct mt76_bus_ops *bus_ops;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7921_rr;
+ bus_ops->wr = mt7921_wr;
+ bus_ops->rmw = mt7921_rmw;
+ dev->mt76.bus = bus_ops;
+
+ ret = __mt7921e_mcu_drv_pmctrl(dev);
+ if (ret)
+ return ret;
+
mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
}
static void
-mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
+mt7921e_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
- struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
+ struct mt7921_tx_free *free = (struct mt7921_tx_free *)data;
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
struct ieee80211_sta *sta = NULL;
+ struct sk_buff *skb, *tmp;
+ void *end = data + len;
LIST_HEAD(free_list);
- struct sk_buff *tmp;
bool wake = false;
u8 i, count;
* Should avoid accessing WTBL to get Tx airtime, and use it instead.
*/
count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ if (WARN_ON_ONCE((void *)&free->info[count] > end))
+ return;
+
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
u8 stat;
if (wake)
mt76_set_tx_blocked(&dev->mt76, false);
- napi_consume_skb(skb, 1);
-
list_for_each_entry_safe(skb, tmp, &free_list, list) {
skb_list_del_init(skb);
napi_consume_skb(skb, 1);
mt76_worker_schedule(&dev->mt76.tx_worker);
}
+bool mt7921e_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7921e_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ return false;
+ default:
+ return true;
+ }
+}
+
void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb)
{
switch (type) {
case PKT_TYPE_TXRX_NOTIFY:
- mt7921_mac_tx_free(dev, skb);
+ mt7921e_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
break;
default:
mt7921_queue_rx_skb(mdev, q, skb);
}
local_bh_enable();
+ dev->fw_assert = false;
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
.headroom = sizeof(struct mt7921_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt7921_mcu_restart,
+ .mcu_restart = mt76_connac_mcu_restart,
};
int err;
return err;
}
-int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_pm *pm = &dev->pm;
int i, err = 0;
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
if (i == MT7921_DRV_OWN_RETRY_COUNT) {
dev_err(dev->mt76.dev, "driver own failed\n");
err = -EIO;
- goto out;
}
+ return err;
+}
+
+int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err;
+
+ err = __mt7921e_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ goto out;
+
mt7921_wpdma_reinit_cond(dev);
clear_bit(MT76_STATE_PM, &mphy->state);
#define MT_DMASHDL_SCHED_SET(_n) MT_DMA_SHDL(0x070 + ((_n) << 2))
+#define MT_CONN_STATUS 0x7c053c10
+#define MT_WIFI_PATCH_DL_STATE BIT(0)
+
#define MT_CONN_ON_LPCTL 0x7c060010
#define PCIE_LPCR_HOST_OWN_SYNC BIT(2)
#define PCIE_LPCR_HOST_CLR_OWN BIT(1)
struct mt7921_sdio_intr *irq_data = sdio->intr_data;
int i, err;
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ sdio_release_host(sdio->func);
+
if (err < 0)
return err;
struct mt7921_dev *dev;
struct mt76_dev *mdev;
- int ret, i;
+ int ret;
mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
goto error;
}
- for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
- mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
- MT76S_XMIT_BUF_SZ,
- GFP_KERNEL);
- if (!mdev->sdio.xmit_buf[i]) {
- ret = -ENOMEM;
- goto error;
- }
- }
-
ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
if (ret)
goto error;
sdio_release_host(sdio->func);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
/* activate mt7921s again */
+ mt7921s_mcu_drv_pmctrl(dev);
+ mt76_clear(dev, MT_CONN_STATUS, MT_WIFI_PATCH_DL_STATE);
mt7921s_mcu_fw_pmctrl(dev);
mt7921s_mcu_drv_pmctrl(dev);
mt7921s_wfsys_reset(dev);
mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
mt76_worker_enable(&dev->mt76.sdio.net_worker);
dev->fw_assert = false;
- clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
mt7921s_enable_irq(&dev->mt76);
return ret;
}
+static u32 mt7921s_read_rm3r(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ return sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+}
+
+static u32 mt7921s_clear_rm3r_drv_own(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 val;
+
+ val = sdio_readl(sdio->func, MCR_D2HRM3R, NULL);
+ if (val)
+ sdio_writel(sdio->func, H2D_SW_INT_CLEAR_MAILBOX_ACK,
+ MCR_WSICR, NULL);
+
+ return val;
+}
+
int mt7921s_mcu_init(struct mt7921_dev *dev)
{
static const struct mt76_mcu_ops mt7921s_mcu_ops = {
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+
+ if (!err && test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ err = readx_poll_timeout(mt7921s_read_rm3r, dev, status,
+ status & D2HRM3R_IS_DRIVER_OWN,
+ 2000, 1000000);
+
sdio_release_host(func);
if (err < 0) {
sdio_claim_host(func);
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
+ err = readx_poll_timeout(mt7921s_clear_rm3r_drv_own,
+ dev, status,
+ !(status & D2HRM3R_IS_DRIVER_OWN),
+ 2000, 1000000);
+ if (err < 0) {
+ dev_err(dev->mt76.dev, "mailbox ACK not cleared\n");
+ goto err;
+ }
+ }
+
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
sdio_release_host(func);
+err:
if (err < 0) {
dev_err(dev->mt76.dev, "firmware own failed\n");
clear_bit(MT76_STATE_PM, &mphy->state);
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
#include <linux/sched.h>
#include <linux/kthread.h>
const struct mt76_bus_ops *bus_ops)
{
struct mt76_sdio *sdio = &dev->sdio;
+ u32 host_max_cap;
int err;
err = mt76_worker_setup(dev->hw, &sdio->status_worker,
dev->bus = bus_ops;
dev->sdio.func = func;
- return 0;
+ host_max_cap = min_t(u32, func->card->host->max_req_size,
+ func->cur_blksize *
+ func->card->host->max_blk_count);
+ dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
+ dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
+ GFP_KERNEL);
+ if (!dev->sdio.xmit_buf)
+ err = -ENOMEM;
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76s_init);
#define MCR_H2DSM0R 0x0070
#define H2D_SW_INT_READ BIT(16)
#define H2D_SW_INT_WRITE BIT(17)
+#define H2D_SW_INT_CLEAR_MAILBOX_ACK BIT(22)
#define MCR_H2DSM1R 0x0074
#define MCR_D2HRM0R 0x0078
#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
+#define D2HRM3R_IS_DRIVER_OWN BIT(0)
#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */
buf = page_address(page);
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
put_page(page);
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
+ sdio_claim_host(sdio->func);
err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
if (err)
dev_err(dev->dev, "sdio write failed: %d\n", err);
static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_sdio *sdio = &dev->sdio;
u8 pad;
- qid = mcu ? ARRAY_SIZE(sdio->xmit_buf) - 1 : q->qid;
while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
struct sk_buff *iter;
}
pad = roundup(e->skb->len, 4) - e->skb->len;
- if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
+ if (len + e->skb->len + pad + 4 > dev->sdio.xmit_buf_sz)
break;
if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
&ple_sz))
break;
- memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
- skb_headlen(e->skb));
+ memcpy(sdio->xmit_buf + len, e->skb->data, skb_headlen(e->skb));
len += skb_headlen(e->skb);
nframes++;
skb_walk_frags(e->skb, iter) {
- memcpy(sdio->xmit_buf[qid] + len, iter->data,
- iter->len);
+ memcpy(sdio->xmit_buf + len, iter->data, iter->len);
len += iter->len;
nframes++;
}
if (unlikely(pad)) {
- memset(sdio->xmit_buf[qid] + len, 0, pad);
+ memset(sdio->xmit_buf + len, 0, pad);
len += pad;
}
next:
}
if (nframes) {
- memset(sdio->xmit_buf[qid] + len, 0, 4);
- err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ memset(sdio->xmit_buf + len, 0, 4);
+ err = __mt76s_xmit_queue(dev, sdio->xmit_buf, len + 4);
if (err)
return err;
}
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
do {
nframes = 0;
} while (nframes > 0);
/* enable interrupt */
+ sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
}
test_bit(MT76_MCU_RESET, &dev->phy.state))
return;
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
mt76_worker_schedule(&sdio->txrx_worker);
}
EXPORT_SYMBOL_GPL(mt76s_sdio_irq);
int nbytes;
u8 rsp;
- if (sz <= DATA_PKT_SZ)
- nbytes = sz;
- else
- nbytes = DATA_PKT_SZ;
+ nbytes = min_t(u32, sz, DATA_PKT_SZ);
/*
* Data Response header
goto failed;
local->sram = ioremap(link->resource[2]->start,
resource_size(link->resource[2]));
+ if (!local->sram)
+ goto failed;
/*** Set up 16k window for shared memory (receive buffer) ***************/
link->resource[3]->flags |=
goto failed;
local->rmem = ioremap(link->resource[3]->start,
resource_size(link->resource[3]));
+ if (!local->rmem)
+ goto failed;
/*** Set up window for attribute memory ***********************************/
link->resource[4]->flags |=
goto failed;
local->amem = ioremap(link->resource[4]->start,
resource_size(link->resource[4]));
+ if (!local->amem)
+ goto failed;
dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram);
dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem);
u32 ul_command;
u32 ul_content;
- u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_encalgo;
u8 entry_i;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
-static void rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
- struct sk_buff_head *list,
- struct rtw_vif *rtwvif)
+static int rtw_append_probe_req_ie(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct sk_buff_head *list, u8 *bands,
+ struct rtw_vif *rtwvif)
{
struct ieee80211_scan_ies *ies = rtwvif->scan_ies;
struct rtw_chip_info *chip = rtwdev->chip;
if (!(BIT(idx) & chip->band))
continue;
new = skb_copy(skb, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
skb_put_data(new, ies->ies[idx], ies->len[idx]);
skb_put_data(new, ies->common_ies, ies->common_ie_len);
skb_queue_tail(list, new);
+ (*bands)++;
}
+
+ return 0;
}
-static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_ssids,
+static int _rtw_hw_scan_update_probe_req(struct rtw_dev *rtwdev, u8 num_probes,
struct sk_buff_head *probe_req_list)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb, *tmp;
u8 page_offset = 1, *buf, page_size = chip->page_size;
- u8 pages = page_offset + num_ssids * RTW_PROBE_PG_CNT;
+ u8 pages = page_offset + num_probes * RTW_PROBE_PG_CNT;
u16 pg_addr = rtwdev->fifo.rsvd_h2c_info_addr, loc;
u16 buf_offset = page_size * page_offset;
u8 tx_desc_sz = chip->tx_pkt_desc_sz;
rtwdev->scan_info.probe_pg_size = page_offset;
out:
kfree(buf);
+ skb_queue_walk_safe(probe_req_list, skb, tmp)
+ kfree_skb(skb);
return ret;
}
{
struct cfg80211_scan_request *req = rtwvif->scan_req;
struct sk_buff_head list;
- struct sk_buff *skb;
- u8 num = req->n_ssids, i;
+ struct sk_buff *skb, *tmp;
+ u8 num = req->n_ssids, i, bands = 0;
+ int ret;
skb_queue_head_init(&list);
for (i = 0; i < num; i++) {
req->ssids[i].ssid,
req->ssids[i].ssid_len,
req->ie_len);
- rtw_append_probe_req_ie(rtwdev, skb, &list, rtwvif);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = rtw_append_probe_req_ie(rtwdev, skb, &list, &bands,
+ rtwvif);
+ if (ret)
+ goto out;
+
kfree_skb(skb);
}
- return _rtw_hw_scan_update_probe_req(rtwdev, num, &list);
+ return _rtw_hw_scan_update_probe_req(rtwdev, num * bands, &list);
+
+out:
+ skb_queue_walk_safe(&list, skb, tmp)
+ kfree_skb(skb);
+
+ return ret;
}
static int rtw_add_chan_info(struct rtw_dev *rtwdev, struct rtw_chan_info *info,
rtwdev->hal.rcr |= BIT_CBSSID_BCN;
rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, true);
ieee80211_wake_queues(rtwdev->hw);
ieee80211_scan_completed(rtwdev->hw, &info);
struct rtw_dev *rtwdev = hw->priv;
int ret = 0;
+ /* let previous ips work finish to ensure we don't leave ips twice */
+ cancel_work_sync(&rtwdev->ips_work);
+
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
struct rtw_dev *rtwdev = hw->priv;
mutex_lock(&rtwdev->mutex);
- rtw_core_scan_complete(rtwdev, vif);
+ rtw_core_scan_complete(rtwdev, vif, false);
mutex_unlock(&rtwdev->mutex);
}
}
}
+static void rtw_ips_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, ips_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_enter_ips(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
{
unsigned long mac_id;
#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
RA_MASK_VHT_RATES_2SS | \
RA_MASK_VHT_RATES_3SS)
+#define RA_MASK_CCK_IN_BG 0x00005
#define RA_MASK_CCK_IN_HT 0x00005
#define RA_MASK_CCK_IN_VHT 0x00005
#define RA_MASK_OFDM_IN_VHT 0x00010
#define RA_MASK_OFDM_IN_HT_2G 0x00010
#define RA_MASK_OFDM_IN_HT_5G 0x00030
-static u64 rtw_update_rate_mask(struct rtw_dev *rtwdev,
- struct rtw_sta_info *si,
- u64 ra_mask, bool is_vht_enable,
- u8 wireless_set)
+static u64 rtw_rate_mask_rssi(struct rtw_sta_info *si, u8 wireless_set)
+{
+ u8 rssi_level = si->rssi_level;
+
+ if (wireless_set == WIRELESS_CCK)
+ return 0xffffffffffffffffULL;
+
+ if (rssi_level == 0)
+ return 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ return 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ return 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ return 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ return 0xffffffffffff8f80ULL;
+ else
+ return 0xffffffffffff0f00ULL;
+}
+
+static u64 rtw_rate_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
+static u64 rtw_rate_mask_cfg(struct rtw_dev *rtwdev, struct rtw_sta_info *si,
+ u64 ra_mask, bool is_vht_enable)
{
struct rtw_hal *hal = &rtwdev->hal;
const struct cfg80211_bitrate_mask *mask = si->mask;
u64 cfg_mask = GENMASK_ULL(63, 0);
- u8 rssi_level, band;
-
- if (wireless_set != WIRELESS_CCK) {
- rssi_level = si->rssi_level;
- if (rssi_level == 0)
- ra_mask &= 0xffffffffffffffffULL;
- else if (rssi_level == 1)
- ra_mask &= 0xfffffffffffffff0ULL;
- else if (rssi_level == 2)
- ra_mask &= 0xffffffffffffefe0ULL;
- else if (rssi_level == 3)
- ra_mask &= 0xffffffffffffcfc0ULL;
- else if (rssi_level == 4)
- ra_mask &= 0xffffffffffff8f80ULL;
- else if (rssi_level >= 5)
- ra_mask &= 0xffffffffffff0f00ULL;
- }
+ u8 band;
if (!si->use_cfg_mask)
return ra_mask;
u8 ldpc_en = 0;
u8 tx_num = 1;
u64 ra_mask = 0;
+ u64 ra_mask_bak = 0;
bool is_vht_enable = false;
bool is_support_sgi = false;
if (hal->current_band_type == RTW_BAND_5G) {
ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ ra_mask_bak = ra_mask;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
dm_info->rrsr_val_init = RRSR_INIT_5G;
} else if (hal->current_band_type == RTW_BAND_2G) {
ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
+ ra_mask_bak = ra_mask;
if (sta->vht_cap.vht_supported) {
ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
RA_MASK_OFDM_IN_VHT;
} else if (sta->supp_rates[0] <= 0xf) {
wireless_set = WIRELESS_CCK;
} else {
+ ra_mask &= RA_MASK_OFDM_RATES | RA_MASK_CCK_IN_BG;
wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
}
dm_info->rrsr_val_init = RRSR_INIT_2G;
} else {
rtw_err(rtwdev, "Unknown band type\n");
+ ra_mask_bak = ra_mask;
wireless_set = 0;
}
rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
- ra_mask = rtw_update_rate_mask(rtwdev, si, ra_mask, is_vht_enable,
- wireless_set);
+ ra_mask &= rtw_rate_mask_rssi(si, wireless_set);
+ ra_mask = rtw_rate_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask = rtw_rate_mask_cfg(rtwdev, si, ra_mask, is_vht_enable);
si->bw_mode = bw_mode;
si->stbc_en = stbc_en;
set_bit(RTW_FLAG_SCANNING, rtwdev->flags);
}
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan)
{
struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
u32 config = 0;
rtw_vif_port_config(rtwdev, rtwvif, config);
rtw_coex_scan_notify(rtwdev, COEX_SCAN_FINISH);
+
+ if (rtwvif->net_type == RTW_NET_NO_LINK && hw_scan)
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->ips_work);
}
int rtw_core_start(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->wl_ccklock_work, rtw_coex_wl_ccklock_work);
INIT_WORK(&rtwdev->tx_work, rtw_tx_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->ips_work, rtw_ips_work);
INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct ips_work;
struct work_struct fw_recovery_work;
/* used to protect txqs list */
void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
void rtw_core_scan_start(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
-void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw_core_scan_complete(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ bool hw_scan);
int rtw_core_start(struct rtw_dev *rtwdev);
void rtw_core_stop(struct rtw_dev *rtwdev);
int rtw_chip_info_setup(struct rtw_dev *rtwdev);
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
[4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [6] = RTW_DEF_RFE(8821c, 0, 0),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
config RTW89_PCI
tristate
+config RTW89_8852A
+ tristate
+
config RTW89_8852AE
tristate "Realtek 8852AE PCI wireless network adapter"
depends on PCI
select RTW89_CORE
select RTW89_PCI
+ select RTW89_8852A
help
Select this option will enable support for 8852AE chipset
mac.o \
phy.o \
fw.o \
- rtw8852a.o \
- rtw8852a_table.o \
- rtw8852a_rfk.o \
- rtw8852a_rfk_table.o \
cam.o \
efuse.o \
regd.o \
ps.o \
ser.o
+obj-$(CONFIG_RTW89_8852A) += rtw89_8852a.o
+rtw89_8852a-objs := rtw8852a.o \
+ rtw8852a_table.o \
+ rtw8852a_rfk.o \
+ rtw8852a_rfk_table.o
+
+obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
+rtw89_8852ae-objs := rtw8852ae.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
sec_cam = addr_cam->sec_entries[key_idx];
if (!sec_cam)
return -EINVAL;
rtw89_cam_deinit(rtwdev, rtwvif);
}
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ addr_cam->valid = false;
+ clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
+}
+
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
- addr_cam->valid = false;
+ rtw89_cam_deinit_addr_cam(rtwdev, addr_cam);
bssid_cam->valid = false;
- clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
}
return 0;
}
-static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif)
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam)
{
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
u8 addr_cam_idx;
int i;
int ret;
addr_cam->valid = true;
addr_cam->addr_mask = 0;
addr_cam->mask_sel = RTW89_NO_MSK;
+ addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
bitmap_zero(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
- ether_addr_copy(addr_cam->sma, rtwvif->mac_addr);
for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
addr_cam->sec_ent_keyid[i] = 0;
addr_cam->sec_ent[i] = 0;
}
+ /* associate addr cam with bssid cam */
+ addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
+
return 0;
}
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
int ret;
- ret = rtw89_cam_init_addr_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
if (ret) {
- rtw89_err(rtwdev, "failed to init addr cam\n");
+ rtw89_err(rtwdev, "failed to init bssid cam\n");
return ret;
}
- ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
+ ret = rtw89_cam_init_addr_cam(rtwdev, addr_cam, bssid_cam);
if (ret) {
- rtw89_err(rtwdev, "failed to init bssid cam\n");
+ rtw89_err(rtwdev, "failed to init addr cam\n");
return ret;
}
- /* associate addr cam with bssid cam */
- addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
-
return 0;
}
u8 *cmd)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta);
const u8 *sma = scan_mac_addr ? scan_mac_addr : rtwvif->mac_addr;
u8 sma_hash, tma_hash, addr_msk_start;
int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam,
+ const struct rtw89_bssid_cam_entry *bssid_cam);
+void rtw89_cam_deinit_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_addr_cam_entry *addr_cam);
void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
struct rtw89_vif *vif,
struct rtw89_sta *rtwsta,
memset(&btc->dm, 0, sizeof(btc->dm));
memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++)
+ for (i = 0; i < RTW89_PORT_NUM; i++)
memset(wl_linfo[i].rssi_state, 0,
sizeof(wl_linfo[i].rssi_state));
wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
en = true;
/* get p2p channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
if (wl_rinfo->active_role[i].role ==
RTW89_WIFI_ROLE_P2P_GO ||
wl_rinfo->active_role[i].role ==
} else {
en = true;
/* get 2g channel */
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
if (wl_rinfo->active_role[i].connected &&
wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
ch = wl_rinfo->active_role[i].ch;
memset(wl_rinfo, 0, sizeof(*wl_rinfo));
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
/* check if role active? */
if (!wl_linfo[i].active)
continue;
rtwdev->is_bt_iqk_timeout = true;
}
}
+EXPORT_SYMBOL(rtw89_btc_ntfy_wl_rfk);
struct rtw89_btc_wl_sta_iter_data {
struct rtw89_dev *rtwdev;
wl_dinfo->real_band[RTW89_PHY_1]);
}
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
plink = &btc->cx.wl.link_info[i];
if (!plink->active)
#include <linux/ip.h>
#include <linux/udp.h>
+#include "cam.h"
#include "coex.h"
#include "core.h"
#include "efuse.h"
module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
+#define RTW89_DEF_CHAN(_freq, _hw_val, _flags, _band) \
+ { .center_freq = _freq, .hw_value = _hw_val, .flags = _flags, .band = _band, }
+#define RTW89_DEF_CHAN_2G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_2GHZ)
+#define RTW89_DEF_CHAN_5G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_5G_NO_HT40MINUS(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, IEEE80211_CHAN_NO_HT40MINUS, NL80211_BAND_5GHZ)
+#define RTW89_DEF_CHAN_6G(_freq, _hw_val) \
+ RTW89_DEF_CHAN(_freq, _hw_val, 0, NL80211_BAND_6GHZ)
+
static struct ieee80211_channel rtw89_channels_2ghz[] = {
- { .center_freq = 2412, .hw_value = 1, },
- { .center_freq = 2417, .hw_value = 2, },
- { .center_freq = 2422, .hw_value = 3, },
- { .center_freq = 2427, .hw_value = 4, },
- { .center_freq = 2432, .hw_value = 5, },
- { .center_freq = 2437, .hw_value = 6, },
- { .center_freq = 2442, .hw_value = 7, },
- { .center_freq = 2447, .hw_value = 8, },
- { .center_freq = 2452, .hw_value = 9, },
- { .center_freq = 2457, .hw_value = 10, },
- { .center_freq = 2462, .hw_value = 11, },
- { .center_freq = 2467, .hw_value = 12, },
- { .center_freq = 2472, .hw_value = 13, },
- { .center_freq = 2484, .hw_value = 14, },
+ RTW89_DEF_CHAN_2G(2412, 1),
+ RTW89_DEF_CHAN_2G(2417, 2),
+ RTW89_DEF_CHAN_2G(2422, 3),
+ RTW89_DEF_CHAN_2G(2427, 4),
+ RTW89_DEF_CHAN_2G(2432, 5),
+ RTW89_DEF_CHAN_2G(2437, 6),
+ RTW89_DEF_CHAN_2G(2442, 7),
+ RTW89_DEF_CHAN_2G(2447, 8),
+ RTW89_DEF_CHAN_2G(2452, 9),
+ RTW89_DEF_CHAN_2G(2457, 10),
+ RTW89_DEF_CHAN_2G(2462, 11),
+ RTW89_DEF_CHAN_2G(2467, 12),
+ RTW89_DEF_CHAN_2G(2472, 13),
+ RTW89_DEF_CHAN_2G(2484, 14),
};
static struct ieee80211_channel rtw89_channels_5ghz[] = {
- {.center_freq = 5180, .hw_value = 36,},
- {.center_freq = 5200, .hw_value = 40,},
- {.center_freq = 5220, .hw_value = 44,},
- {.center_freq = 5240, .hw_value = 48,},
- {.center_freq = 5260, .hw_value = 52,},
- {.center_freq = 5280, .hw_value = 56,},
- {.center_freq = 5300, .hw_value = 60,},
- {.center_freq = 5320, .hw_value = 64,},
- {.center_freq = 5500, .hw_value = 100,},
- {.center_freq = 5520, .hw_value = 104,},
- {.center_freq = 5540, .hw_value = 108,},
- {.center_freq = 5560, .hw_value = 112,},
- {.center_freq = 5580, .hw_value = 116,},
- {.center_freq = 5600, .hw_value = 120,},
- {.center_freq = 5620, .hw_value = 124,},
- {.center_freq = 5640, .hw_value = 128,},
- {.center_freq = 5660, .hw_value = 132,},
- {.center_freq = 5680, .hw_value = 136,},
- {.center_freq = 5700, .hw_value = 140,},
- {.center_freq = 5720, .hw_value = 144,},
- {.center_freq = 5745, .hw_value = 149,},
- {.center_freq = 5765, .hw_value = 153,},
- {.center_freq = 5785, .hw_value = 157,},
- {.center_freq = 5805, .hw_value = 161,},
- {.center_freq = 5825, .hw_value = 165,
- .flags = IEEE80211_CHAN_NO_HT40MINUS},
+ RTW89_DEF_CHAN_5G(5180, 36),
+ RTW89_DEF_CHAN_5G(5200, 40),
+ RTW89_DEF_CHAN_5G(5220, 44),
+ RTW89_DEF_CHAN_5G(5240, 48),
+ RTW89_DEF_CHAN_5G(5260, 52),
+ RTW89_DEF_CHAN_5G(5280, 56),
+ RTW89_DEF_CHAN_5G(5300, 60),
+ RTW89_DEF_CHAN_5G(5320, 64),
+ RTW89_DEF_CHAN_5G(5500, 100),
+ RTW89_DEF_CHAN_5G(5520, 104),
+ RTW89_DEF_CHAN_5G(5540, 108),
+ RTW89_DEF_CHAN_5G(5560, 112),
+ RTW89_DEF_CHAN_5G(5580, 116),
+ RTW89_DEF_CHAN_5G(5600, 120),
+ RTW89_DEF_CHAN_5G(5620, 124),
+ RTW89_DEF_CHAN_5G(5640, 128),
+ RTW89_DEF_CHAN_5G(5660, 132),
+ RTW89_DEF_CHAN_5G(5680, 136),
+ RTW89_DEF_CHAN_5G(5700, 140),
+ RTW89_DEF_CHAN_5G(5720, 144),
+ RTW89_DEF_CHAN_5G(5745, 149),
+ RTW89_DEF_CHAN_5G(5765, 153),
+ RTW89_DEF_CHAN_5G(5785, 157),
+ RTW89_DEF_CHAN_5G(5805, 161),
+ RTW89_DEF_CHAN_5G_NO_HT40MINUS(5825, 165),
+};
+
+static struct ieee80211_channel rtw89_channels_6ghz[] = {
+ RTW89_DEF_CHAN_6G(5955, 1),
+ RTW89_DEF_CHAN_6G(5975, 5),
+ RTW89_DEF_CHAN_6G(5995, 9),
+ RTW89_DEF_CHAN_6G(6015, 13),
+ RTW89_DEF_CHAN_6G(6035, 17),
+ RTW89_DEF_CHAN_6G(6055, 21),
+ RTW89_DEF_CHAN_6G(6075, 25),
+ RTW89_DEF_CHAN_6G(6095, 29),
+ RTW89_DEF_CHAN_6G(6115, 33),
+ RTW89_DEF_CHAN_6G(6135, 37),
+ RTW89_DEF_CHAN_6G(6155, 41),
+ RTW89_DEF_CHAN_6G(6175, 45),
+ RTW89_DEF_CHAN_6G(6195, 49),
+ RTW89_DEF_CHAN_6G(6215, 53),
+ RTW89_DEF_CHAN_6G(6235, 57),
+ RTW89_DEF_CHAN_6G(6255, 61),
+ RTW89_DEF_CHAN_6G(6275, 65),
+ RTW89_DEF_CHAN_6G(6295, 69),
+ RTW89_DEF_CHAN_6G(6315, 73),
+ RTW89_DEF_CHAN_6G(6335, 77),
+ RTW89_DEF_CHAN_6G(6355, 81),
+ RTW89_DEF_CHAN_6G(6375, 85),
+ RTW89_DEF_CHAN_6G(6395, 89),
+ RTW89_DEF_CHAN_6G(6415, 93),
+ RTW89_DEF_CHAN_6G(6435, 97),
+ RTW89_DEF_CHAN_6G(6455, 101),
+ RTW89_DEF_CHAN_6G(6475, 105),
+ RTW89_DEF_CHAN_6G(6495, 109),
+ RTW89_DEF_CHAN_6G(6515, 113),
+ RTW89_DEF_CHAN_6G(6535, 117),
+ RTW89_DEF_CHAN_6G(6555, 121),
+ RTW89_DEF_CHAN_6G(6575, 125),
+ RTW89_DEF_CHAN_6G(6595, 129),
+ RTW89_DEF_CHAN_6G(6615, 133),
+ RTW89_DEF_CHAN_6G(6635, 137),
+ RTW89_DEF_CHAN_6G(6655, 141),
+ RTW89_DEF_CHAN_6G(6675, 145),
+ RTW89_DEF_CHAN_6G(6695, 149),
+ RTW89_DEF_CHAN_6G(6715, 153),
+ RTW89_DEF_CHAN_6G(6735, 157),
+ RTW89_DEF_CHAN_6G(6755, 161),
+ RTW89_DEF_CHAN_6G(6775, 165),
+ RTW89_DEF_CHAN_6G(6795, 169),
+ RTW89_DEF_CHAN_6G(6815, 173),
+ RTW89_DEF_CHAN_6G(6835, 177),
+ RTW89_DEF_CHAN_6G(6855, 181),
+ RTW89_DEF_CHAN_6G(6875, 185),
+ RTW89_DEF_CHAN_6G(6895, 189),
+ RTW89_DEF_CHAN_6G(6915, 193),
+ RTW89_DEF_CHAN_6G(6935, 197),
+ RTW89_DEF_CHAN_6G(6955, 201),
+ RTW89_DEF_CHAN_6G(6975, 205),
+ RTW89_DEF_CHAN_6G(6995, 209),
+ RTW89_DEF_CHAN_6G(7015, 213),
+ RTW89_DEF_CHAN_6G(7035, 217),
+ RTW89_DEF_CHAN_6G(7055, 221),
+ RTW89_DEF_CHAN_6G(7075, 225),
+ RTW89_DEF_CHAN_6G(7095, 229),
+ RTW89_DEF_CHAN_6G(7115, 233),
};
static struct ieee80211_rate rtw89_bitrates[] = {
.vht_cap = {0},
};
+static struct ieee80211_supported_band rtw89_sband_6ghz = {
+ .band = NL80211_BAND_6GHZ,
+ .channels = rtw89_channels_6ghz,
+ .n_channels = ARRAY_SIZE(rtw89_channels_6ghz),
+
+ /* 6G has no CCK rates, 1M/2M/5.5M/11M */
+ .bitrates = rtw89_bitrates + 4,
+ .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
+};
+
static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
struct rtw89_traffic_stats *stats,
struct sk_buff *skb, bool tx)
u8 center_chan;
u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
u8 primary_chan_idx = 0;
+ u8 band;
+ u8 subband;
center_chan = channel->hw_value;
primary_freq = channel->center_freq;
break;
}
+ switch (channel->band) {
+ default:
+ case NL80211_BAND_2GHZ:
+ band = RTW89_BAND_2G;
+ break;
+ case NL80211_BAND_5GHZ:
+ band = RTW89_BAND_5G;
+ break;
+ case NL80211_BAND_6GHZ:
+ band = RTW89_BAND_6G;
+ break;
+ }
+
+ switch (center_chan) {
+ default:
+ case 1 ... 14:
+ subband = RTW89_CH_2G;
+ break;
+ case 36 ... 64:
+ subband = RTW89_CH_5G_BAND_1;
+ break;
+ case 100 ... 144:
+ subband = RTW89_CH_5G_BAND_3;
+ break;
+ case 149 ... 177:
+ subband = RTW89_CH_5G_BAND_4;
+ break;
+ }
+
chan_param->center_chan = center_chan;
chan_param->primary_chan = channel->hw_value;
chan_param->bandwidth = bandwidth;
chan_param->pri_ch_idx = primary_chan_idx;
+ chan_param->band_type = band;
+ chan_param->subband_type = subband;
}
void rtw89_set_channel(struct rtw89_dev *rtwdev)
struct rtw89_channel_params ch_param;
struct rtw89_channel_help_params bak;
u8 center_chan, bandwidth;
- u8 band_type;
bool band_changed;
rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
center_chan = ch_param.center_chan;
bandwidth = ch_param.bandwidth;
- band_type = center_chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
- band_changed = hal->current_band_type != band_type ||
+ band_changed = hal->current_band_type != ch_param.band_type ||
hal->current_channel == 0;
hal->current_band_width = bandwidth;
hal->current_channel = center_chan;
hal->prev_primary_channel = hal->current_primary_channel;
hal->current_primary_channel = ch_param.primary_chan;
- hal->current_band_type = band_type;
-
- switch (center_chan) {
- case 1 ... 14:
- hal->current_subband = RTW89_CH_2G;
- break;
- case 36 ... 64:
- hal->current_subband = RTW89_CH_5G_BAND_1;
- break;
- case 100 ... 144:
- hal->current_subband = RTW89_CH_5G_BAND_3;
- break;
- case 149 ... 177:
- hal->current_subband = RTW89_CH_5G_BAND_4;
- break;
- }
+ hal->current_band_type = ch_param.band_type;
+ hal->current_subband = ch_param.subband_type;
rtw89_chip_set_channel_prepare(rtwdev, &bak);
struct rtw89_core_tx_request *tx_req)
{
struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_sta *sta = tx_req->sta;
struct ieee80211_tx_info *info;
struct ieee80211_key_conf *key;
struct rtw89_vif *rtwvif;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_addr_cam_entry *addr_cam;
struct rtw89_sec_cam_entry *sec_cam;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
}
rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = &rtwvif->addr_cam;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
info = IEEE80211_SKB_CB(skb);
key = info->control.hw_key;
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
u8 qsel, ch_dma;
- qsel = RTW89_TX_QSEL_B0_MGMT;
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : RTW89_TX_QSEL_B0_MGMT;
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
- desc_info->qsel = RTW89_TX_QSEL_B0_MGMT;
+ desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
+ desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
/* fixed data rate for mgmt frames */
desc_info->en_wd_info = true;
desc_info->bk = true;
}
+static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta;
+
+ if (!sta)
+ return rtwvif->mac_id;
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ return rtwsta->mac_id;
+}
+
static void
rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
- qsel = rtw89_core_get_qsel(rtwdev, tid);
+ qsel = desc_info->hiq ? RTW89_TX_QSEL_B0_HI : rtw89_core_get_qsel(rtwdev, tid);
ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
desc_info->ch_dma = ch_dma;
desc_info->tid_indicate = tid_indicate;
desc_info->qsel = qsel;
+ desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
+ desc_info->port = desc_info->hiq ? rtwvif->port : 0;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
{
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (void *)skb->data;
enum rtw89_core_tx_type tx_type;
enum btc_pkt_type pkt_type;
desc_info->pkt_size = skb->len;
desc_info->is_bmc = is_bmc;
desc_info->wd_page = true;
+ desc_info->hiq = info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM;
switch (tx_req->tx_type) {
case RTW89_CORE_TX_TYPE_MGMT:
FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
- FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
+ FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_SEL, desc_info->hw_ssn_sel) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HW_SSN_MODE, desc_info->hw_seq_mode);
return cpu_to_le32(dword);
}
{
u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
- FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size);
+ FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size) |
+ FIELD_PREP(RTW89_TXWD_BODY2_MACID, desc_info->mac_id);
return cpu_to_le32(dword);
}
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
- FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
}
rx_status->rate_idx -= 4;
}
+static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb_ppdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ rtw89_core_hw_to_sband_rate(rx_status);
+ rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ rtwdev->napi_budget_countdown--;
+}
+
static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct rtw89_rx_desc_info *desc_info,
if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
rtw89_correct_cck_chan(rtwdev, rx_status);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, phy_ppdu, desc_info, skb_ppdu, rx_status);
}
}
skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb_ppdu, rx_status);
}
}
memset(rx_status, 0, sizeof(*rx_status));
rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
if (desc_info->long_rxdesc &&
- BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) {
+ BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP)
skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
- } else {
- rtw89_core_hw_to_sband_rate(rx_status);
- rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb);
- ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi);
- rtwdev->napi_budget_countdown--;
- }
+ else
+ rtw89_core_rx_to_mac80211(rtwdev, NULL, desc_info, skb, rx_status);
}
EXPORT_SYMBOL(rtw89_core_rx);
bitmap_zero(addr, nbits);
}
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_ba_cam_entry *entry;
+ u8 idx;
+
+ idx = rtw89_core_acquire_bit_map(rtwsta->ba_cam_map, RTW89_BA_CAM_NUM);
+ if (idx == RTW89_BA_CAM_NUM) {
+ /* allocate a static BA CAM to tid=0, so replace the existing
+ * one if BA CAM is full. Hardware will process the original tid
+ * automatically.
+ */
+ if (tid != 0)
+ return -ENOSPC;
+
+ idx = 0;
+ }
+
+ entry = &rtwsta->ba_cam_entry[idx];
+ entry->tid = tid;
+ *cam_idx = idx;
+
+ return 0;
+}
+
+int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx)
+{
+ struct rtw89_ba_cam_entry *entry;
+ int i;
+
+ for (i = 0; i < RTW89_BA_CAM_NUM; i++) {
+ if (!test_bit(i, rtwsta->ba_cam_map))
+ continue;
+
+ entry = &rtwsta->ba_cam_entry[i];
+ if (entry->tid != tid)
+ continue;
+
+ rtw89_core_release_bit_map(rtwsta->ba_cam_map, i);
+ *cam_idx = i;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
#define RTW89_TYPE_MAPPING(_type) \
case NL80211_IFTYPE_ ## _type: \
rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
+ } else if (vif->type == NL80211_IFTYPE_AP) {
+ rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
+ RTW89_MAX_MAC_ID_NUM);
}
return 0;
rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
rtw89_mac_bf_disassoc(rtwdev, vif, sta);
rtw89_core_free_sta_pending_ba(rtwdev, sta);
+ if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
- rtw89_vif_type_mapping(vif, false);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ rtw89_vif_type_mapping(vif, false);
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 1);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, true);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
}
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+ }
+
/* update cam aid mac_id net_type */
- rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c cam\n");
return ret;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int ret;
- rtw89_vif_type_mapping(vif, true);
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+
+ ret = rtw89_cam_init_addr_cam(rtwdev, &rtwsta->addr_cam, &rtwvif->bssid_cam);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c init addr cam\n");
+ return ret;
+ }
+ }
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
return ret;
}
- ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0);
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, rtwsta, false);
if (ret) {
rtw89_warn(rtwdev, "failed to send h2c join info\n");
return ret;
if (vif->type == NL80211_IFTYPE_STATION)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
+ else if (vif->type == NL80211_IFTYPE_AP)
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
return 0;
}
{
struct ieee80211_hw *hw = rtwdev->hw;
struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
+ struct ieee80211_supported_band *sband_6ghz = NULL;
u32 size = sizeof(struct ieee80211_supported_band);
+ u8 support_bands = rtwdev->chip->support_bands;
- sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
- if (!sband_2ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
- hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ if (support_bands & BIT(NL80211_BAND_2GHZ)) {
+ sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
+ if (!sband_2ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+ }
- sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
- if (!sband_5ghz)
- goto err;
- rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
- rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
- rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
- hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ if (support_bands & BIT(NL80211_BAND_5GHZ)) {
+ sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
+ if (!sband_5ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+ }
+
+ if (support_bands & BIT(NL80211_BAND_6GHZ)) {
+ sband_6ghz = kmemdup(&rtw89_sband_6ghz, size, GFP_KERNEL);
+ if (!sband_6ghz)
+ goto err;
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_6GHZ, sband_6ghz);
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = sband_6ghz;
+ }
return 0;
err:
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
if (sband_2ghz)
kfree(sband_2ghz->iftype_data);
if (sband_5ghz)
kfree(sband_5ghz->iftype_data);
+ if (sband_6ghz)
+ kfree(sband_6ghz->iftype_data);
kfree(sband_2ghz);
kfree(sband_5ghz);
+ kfree(sband_6ghz);
return -ENOMEM;
}
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
+ if (hw->wiphy->bands[NL80211_BAND_6GHZ])
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]->iftype_data);
kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_6GHZ]);
hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_6GHZ] = NULL;
}
static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
}
+void rtw89_core_update_beacon_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev;
+ struct rtw89_vif *rtwvif = container_of(work, struct rtw89_vif,
+ update_beacon_work);
+
+ if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ rtwdev = rtwvif->rtwdev;
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+ mutex_unlock(&rtwdev->mutex);
+}
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
rtwdev->hal.cv = cv;
}
+static void rtw89_core_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hal.support_cckpd =
+ !(rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv <= CHIP_CBV) &&
+ !(rtwdev->chip->chip_id == RTL8852B && rtwdev->hal.cv <= CHIP_CAV);
+}
+
static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
{
int ret;
if (ret)
return ret;
+ rtw89_core_setup_phycap(rtwdev);
+
rtw89_mac_pwr_off(rtwdev);
return 0;
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
struct rtw89_dev;
extern const struct ieee80211_ops rtw89_ops;
-extern const struct rtw89_chip_info rtw8852a_chip_info;
#define MASKBYTE0 0xff
#define MASKBYTE1 0xff00
#define MAX_RSSI 110
#define RSSI_FACTOR 1
#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
-#define RTW89_MAX_HW_PORT_NUM 5
#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
#define RTW89_HTC_VARIANT_HE 3
};
enum rtw89_upd_mode {
- RTW89_VIF_CREATE,
- RTW89_VIF_REMOVE,
- RTW89_VIF_TYPE_CHANGE,
- RTW89_VIF_INFO_CHANGE,
- RTW89_VIF_CON_DISCONN
+ RTW89_ROLE_CREATE,
+ RTW89_ROLE_REMOVE,
+ RTW89_ROLE_TYPE_CHANGE,
+ RTW89_ROLE_INFO_CHANGE,
+ RTW89_ROLE_CON_DISCONN
};
enum rtw89_self_role {
enum rtw89_band {
RTW89_BAND_2G = 0,
RTW89_BAND_5G = 1,
+ RTW89_BAND_6G = 2,
RTW89_BAND_MAX,
};
RTW89_REGD_NUM,
};
-extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
-extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
-
struct rtw89_txpwr_byrate {
s8 cck[RTW89_RATE_CCK_MAX];
s8 ofdm[RTW89_RATE_OFDM_MAX];
u8 primary_chan;
u8 bandwidth;
u8 pri_ch_idx;
+ u8 band_type;
+ u8 subband_type;
};
struct rtw89_channel_help_params {
struct rtw89_tx_desc_info {
u16 pkt_size;
u8 wp_offset;
+ u8 mac_id;
u8 qsel;
u8 ch_dma;
u8 hdr_llc_len;
bool fw_dl;
u16 seq;
bool a_ctrl_bsr;
+ u8 hw_ssn_sel;
+#define RTW89_MGMT_HW_SSN_SEL 1
+ u8 hw_seq_mode;
+#define RTW89_MGMT_HW_SEQ_MODE 1
+ bool hiq;
+ u8 port;
};
struct rtw89_core_tx_request {
u8 connect_cnt;
u8 link_mode;
union rtw89_btc_wl_role_info_map role_map;
- struct rtw89_btc_wl_active_role active_role[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_active_role active_role[RTW89_PORT_NUM];
};
struct rtw89_btc_wl_ver_info {
};
struct rtw89_btc_wl_info {
- struct rtw89_btc_wl_link_info link_info[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM];
struct rtw89_btc_wl_rfk_info rfk_info;
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
DECLARE_EWMA(rssi, 10, 16);
-struct rtw89_sta {
- u8 mac_id;
- bool disassoc;
- struct rtw89_vif *rtwvif;
- struct rtw89_ra_info ra;
- struct rtw89_ra_report ra_report;
- int max_agg_wait;
- u8 prev_rssi;
- struct ewma_rssi avg_rssi;
- struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
- struct ieee80211_rx_status rx_status;
- u16 rx_hw_rate;
- __le32 htc_template;
-
- bool use_cfg_mask;
- struct cfg80211_bitrate_mask mask;
+#define RTW89_BA_CAM_NUM 2
- bool cctl_tx_time;
- u32 ampdu_max_time:4;
- bool cctl_tx_retry_limit;
- u32 data_tx_cnt_lmt:6;
+struct rtw89_ba_cam_entry {
+ u8 tid;
};
#define RTW89_MAX_ADDR_CAM_NUM 128
u8 wapi : 1;
u8 mask_sel : 2;
u8 bssid_cam_idx: 6;
- u8 sma[ETH_ALEN];
u8 sec_ent_mode;
DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
u8 key[32];
};
+struct rtw89_sta {
+ u8 mac_id;
+ bool disassoc;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_ra_info ra;
+ struct rtw89_ra_report ra_report;
+ int max_agg_wait;
+ u8 prev_rssi;
+ struct ewma_rssi avg_rssi;
+ struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ struct ieee80211_rx_status rx_status;
+ u16 rx_hw_rate;
+ __le32 htc_template;
+ struct rtw89_addr_cam_entry addr_cam; /* AP mode only */
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask mask;
+
+ bool cctl_tx_time;
+ u32 ampdu_max_time:4;
+ bool cctl_tx_retry_limit;
+ u32 data_tx_cnt_lmt:6;
+
+ DECLARE_BITMAP(ba_cam_map, RTW89_BA_CAM_NUM);
+ struct rtw89_ba_cam_entry ba_cam_entry[RTW89_BA_CAM_NUM];
+};
+
struct rtw89_efuse {
bool valid;
u8 xtal_cap;
struct rtw89_vif {
struct list_head list;
+ struct rtw89_dev *rtwdev;
u8 mac_id;
u8 port;
u8 mac_addr[ETH_ALEN];
bool wowlan_magic;
bool is_hesta;
bool last_a_ctrl;
+ struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
const struct rtw89_hfc_param_ini *hfc_param_ini;
const struct rtw89_dle_mem *dle_mem;
u32 rf_base_addr[2];
+ u8 support_bands;
u8 rf_path_num;
u8 tx_nss;
u8 rx_nss;
u8 ps_mode_supported;
};
+struct rtw89_driver_info {
+ const struct rtw89_chip_info *chip;
+};
+
enum rtw89_hcifc_mode {
RTW89_HCIFC_POH = 0,
RTW89_HCIFC_STF = 1,
u32 antenna_rx;
u8 tx_nss;
u8 rx_nss;
+ bool support_cckpd;
};
#define RTW89_MAX_MAC_ID_NUM 128
struct rtw89_ser ser;
- DECLARE_BITMAP(hw_port, RTW89_MAX_HW_PORT_NUM);
+ DECLARE_BITMAP(hw_port, RTW89_PORT_NUM);
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
return sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
}
+static inline
+struct rtw89_addr_cam_entry *rtw89_get_addr_cam_of(struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta)
+{
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE && rtwsta)
+ return &rtwsta->addr_cam;
+ return &rtwvif->addr_cam;
+}
+
static inline
void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
struct rtw89_channel_help_params *p)
u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
+int rtw89_core_acquire_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
+int rtw89_core_release_sta_ba_entry(struct rtw89_sta *rtwsta, u8 tid, u8 *cam_idx);
void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate);
struct rtw89_traffic_stats *stats);
int rtw89_core_start(struct rtw89_dev *rtwdev);
void rtw89_core_stop(struct rtw89_dev *rtwdev);
+void rtw89_core_update_beacon_work(struct work_struct *work);
#endif
static const struct rtw89_rx_rate_cnt_info {
enum rtw89_hw_rate first_rate;
int len;
+ int ext;
const char *rate_mode;
} rtw89_rx_rate_cnt_infos[] = {
- {RTW89_HW_RATE_CCK1, 4, "Legacy:"},
- {RTW89_HW_RATE_OFDM6, 8, "OFDM:"},
- {RTW89_HW_RATE_MCS0, 8, "HT 0:"},
- {RTW89_HW_RATE_MCS8, 8, "HT 1:"},
- {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, "VHT 1SS:"},
- {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, "VHT 2SS:"},
- {RTW89_HW_RATE_HE_NSS1_MCS0, 12, "HE 1SS:"},
- {RTW89_HW_RATE_HE_NSS2_MCS0, 12, "HE 2ss:"},
+ {RTW89_HW_RATE_CCK1, 4, 0, "Legacy:"},
+ {RTW89_HW_RATE_OFDM6, 8, 0, "OFDM:"},
+ {RTW89_HW_RATE_MCS0, 8, 0, "HT 0:"},
+ {RTW89_HW_RATE_MCS8, 8, 0, "HT 1:"},
+ {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, 2, "VHT 1SS:"},
+ {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, 2, "VHT 2SS:"},
+ {RTW89_HW_RATE_HE_NSS1_MCS0, 12, 0, "HE 1SS:"},
+ {RTW89_HW_RATE_HE_NSS2_MCS0, 12, 0, "HE 2ss:"},
};
static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
seq_printf(m, "%10s [", info->rate_mode);
rtw89_debug_append_rx_rate(m, pkt_stat,
info->first_rate, info->len);
+ if (info->ext) {
+ seq_puts(m, "][");
+ rtw89_debug_append_rx_rate(m, pkt_stat,
+ info->first_rate + info->len, info->ext);
+ }
seq_puts(m, "]\n");
}
return 0;
}
+static void rtw89_dump_addr_cam(struct seq_file *m,
+ struct rtw89_addr_cam_entry *addr_cam)
+{
+ struct rtw89_sec_cam_entry *sec_entry;
+ int i;
+
+ seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
+ seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
+ seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
+ addr_cam->sec_cam_map);
+ for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
+ sec_entry = addr_cam->sec_entries[i];
+ if (!sec_entry)
+ continue;
+ seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
+ if (sec_entry->ext_key)
+ seq_printf(m, ", %u", sec_entry->sec_cam_idx + 1);
+ seq_puts(m, "\n");
+ }
+}
+
+static
+void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
+ seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
+ rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+}
+
+static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct seq_file *m = (struct seq_file *)data;
+
+ seq_printf(m, "STA [%d] %pM\n", rtwsta->mac_id, sta->addr);
+ rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+}
+
+static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+
+ seq_puts(m, "map:\n");
+ seq_printf(m, "\tmac_id: %*ph\n", (int)sizeof(rtwdev->mac_id_map),
+ rtwdev->mac_id_map);
+ seq_printf(m, "\taddr_cam: %*ph\n", (int)sizeof(cam_info->addr_cam_map),
+ cam_info->addr_cam_map);
+ seq_printf(m, "\tbssid_cam: %*ph\n", (int)sizeof(cam_info->bssid_cam_map),
+ cam_info->bssid_cam_map);
+ seq_printf(m, "\tsec_cam: %*ph\n", (int)sizeof(cam_info->sec_cam_map),
+ cam_info->sec_cam_map);
+
+ ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
+ IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_ids_get_iter, m);
+
+ return 0;
+}
+
static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
.cb_read = rtw89_debug_priv_read_reg_get,
.cb_write = rtw89_debug_priv_read_reg_select,
.cb_read = rtw89_debug_priv_phy_info_get,
};
+static struct rtw89_debugfs_priv rtw89_debug_priv_stations = {
+ .cb_read = rtw89_debug_priv_stations_get,
+};
+
#define rtw89_debugfs_add(name, mode, fopname, parent) \
do { \
rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
rtw89_debugfs_add_w(btc_manual);
rtw89_debugfs_add_w(fw_log_manual);
rtw89_debugfs_add_r(phy_info);
+ rtw89_debugfs_add_r(stations);
}
#endif
return -EBUSY;
}
-#define H2C_BA_CAM_LEN 4
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params)
+#define H2C_BA_CAM_LEN 8
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params)
{
+ u8 macid = rtwsta->mac_id;
struct sk_buff *skb;
+ u8 entry_idx;
+ int ret;
+
+ ret = valid ?
+ rtw89_core_acquire_sta_ba_entry(rtwsta, params->tid, &entry_idx) :
+ rtw89_core_release_sta_ba_entry(rtwsta, params->tid, &entry_idx);
+ if (ret) {
+ /* it still works even if we don't have static BA CAM, because
+ * hardware can create dynamic BA CAM automatically.
+ */
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to %s entry tid=%d for h2c ba cam\n",
+ valid ? "alloc" : "free", params->tid);
+ return 0;
+ }
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
if (!skb) {
}
skb_put(skb, H2C_BA_CAM_LEN);
SET_BA_CAM_MACID(skb->data, macid);
+ SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx);
if (!valid)
goto end;
SET_BA_CAM_VALID(skb->data, valid);
else
SET_BA_CAM_BMAP_SIZE(skb->data, 0);
/* If init req is set, hw will set the ssn */
- SET_BA_CAM_INIT_REQ(skb->data, 0);
+ SET_BA_CAM_INIT_REQ(skb->data, 1);
SET_BA_CAM_SSN(skb->data, params->ssn);
end:
}
#define H2C_CMC_TBL_LEN 68
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct sk_buff *skb;
u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+ u8 macid = rtwvif->mac_id;
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
if (!skb) {
SET_CMC_TBL_ANTSEL_D(skb->data, 0);
SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
struct ieee80211_sta *sta)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct sk_buff *skb;
u8 pads[RTW89_PPE_BW_NUM];
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
memset(pads, 0, sizeof(pads));
- __get_sta_he_pkt_padding(rtwdev, sta, pads);
+ if (sta)
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
if (!skb) {
return -ENOMEM;
}
skb_put(skb, H2C_CMC_TBL_LEN);
- SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_MACID(skb->data, mac_id);
SET_CTRL_INFO_OPERATION(skb->data, 1);
SET_CMC_TBL_DISRTSFB(skb->data, 1);
SET_CMC_TBL_DISDATAFB(skb->data, 1);
SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
- SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (sta)
+ SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ SET_CMC_TBL_DATA_DCM(skb->data, 0);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
return -EBUSY;
}
-#define H2C_VIF_MAINTAIN_LEN 4
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode)
+#define H2C_BCN_BASE_LEN 12
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct sk_buff *skb;
+ struct sk_buff *skb_beacon;
+ u16 tim_offset;
+ int bcn_total_len;
+
+ skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, NULL);
+ if (!skb_beacon) {
+ rtw89_err(rtwdev, "failed to get beacon skb\n");
+ return -ENOMEM;
+ }
+
+ bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len;
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(bcn_total_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ dev_kfree_skb_any(skb_beacon);
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BCN_BASE_LEN);
+
+ SET_BCN_UPD_PORT(skb->data, rtwvif->port);
+ SET_BCN_UPD_MBSSID(skb->data, 0);
+ SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx);
+ SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset);
+ SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id);
+ SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL);
+ SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE);
+ SET_BCN_UPD_RATE(skb->data, hal->current_band_type == RTW89_BAND_2G ?
+ RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6);
+
+ skb_put_data(skb, skb_beacon->data, skb_beacon->len);
+ dev_kfree_skb_any(skb_beacon);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_BCN_UPD, 0, 1,
+ bcn_total_len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ dev_kfree_skb_any(skb);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define H2C_ROLE_MAINTAIN_LEN 4
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode)
{
struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN);
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) {
+ if (rtwsta)
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ else
+ self_role = rtwvif->self_role;
+ } else {
+ self_role = rtwvif->self_role;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_ROLE_MAINTAIN_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
return -ENOMEM;
}
- skb_put(skb, H2C_VIF_MAINTAIN_LEN);
- SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id);
- SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role);
+ skb_put(skb, H2C_ROLE_MAINTAIN_LEN);
+ SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id);
+ SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role);
SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
- H2C_VIF_MAINTAIN_LEN);
+ H2C_ROLE_MAINTAIN_LEN);
if (rtw89_h2c_tx(rtwdev, skb, false)) {
rtw89_err(rtwdev, "failed to send h2c\n");
#define H2C_JOIN_INFO_LEN 4
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn)
+ struct rtw89_sta *rtwsta, bool dis_conn)
{
struct sk_buff *skb;
+ u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id;
+ u8 self_role = rtwvif->self_role;
+ u8 net_type = rtwvif->net_type;
+
+ if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) {
+ self_role = RTW89_SELF_ROLE_AP_CLIENT;
+ net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type;
+ }
skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
if (!skb) {
return -ENOMEM;
}
skb_put(skb, H2C_JOIN_INFO_LEN);
- SET_JOININFO_MACID(skb->data, rtwvif->mac_id);
+ SET_JOININFO_MACID(skb->data, mac_id);
SET_JOININFO_OP(skb->data, dis_conn);
SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
SET_JOININFO_WMM(skb->data, rtwvif->wmm);
SET_JOININFO_TF_MAC_PAD(skb->data, 0);
SET_JOININFO_DL_T_PE(skb->data, 0);
SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
- SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type);
+ SET_JOININFO_NET_TYPE(skb->data, net_type);
SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
- SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role);
+ SET_JOININFO_SELF_ROLE(skb->data, self_role);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
return -EBUSY;
}
-#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
- for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) {
+ for (i = 0; i < RTW89_PORT_NUM; i++, active++) {
RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
GENMASK(31, 30));
}
+static inline void SET_BCN_UPD_PORT(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_MBSSID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
+}
+
+static inline void SET_BCN_UPD_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
+}
+
+static inline void SET_BCN_UPD_GRP_IE_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c, (val - 24) | BIT(7), GENMASK(31, 24));
+}
+
+static inline void SET_BCN_UPD_MACID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BCN_UPD_SSN_SEL(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(9, 8));
+}
+
+static inline void SET_BCN_UPD_SSN_MODE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(11, 10));
+}
+
+static inline void SET_BCN_UPD_RATE(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(20, 12));
+}
+
+static inline void SET_BCN_UPD_TXPWR(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(23, 21));
+}
+
+static inline void SET_BCN_UPD_TXINFO_CTRL_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(0));
+}
+
+static inline void SET_BCN_UPD_NTX_PATH_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(4, 1));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(6, 5));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(8, 7));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(10, 9));
+}
+
+static inline void SET_BCN_UPD_PATH_MAP_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(12, 11));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_A(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(13));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_B(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(14));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_C(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(15));
+}
+
+static inline void SET_BCN_UPD_PATH_ANTSEL_D(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, BIT(16));
+}
+
+static inline void SET_BCN_UPD_CSA_OFST(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 17));
+}
+
static inline void SET_FWROLE_MAINTAIN_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20));
}
+static inline void SET_BA_CAM_UID(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(7, 0));
+}
+
+static inline void SET_BA_CAM_STD_EN(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(8));
+}
+
+static inline void SET_BA_CAM_BAND(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, BIT(9));
+}
+
+static inline void SET_BA_CAM_ENTRY_IDX_V1(void *h2c, u32 val)
+{
+ le32p_replace_bits((__le32 *)h2c + 1, val, GENMASK(31, 28));
+}
+
static inline void SET_LPS_PARM_MACID(void *h2c, u32 val)
{
le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0));
/* CLASS 5 - Frame Exchange */
#define H2C_CL_MAC_FR_EXCHG 0x5
#define H2C_FUNC_MAC_CCTLINFO_UD 0x2
+#define H2C_FUNC_MAC_BCN_UPD 0x5
/* CLASS 6 - Address CAM */
#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
u8 type, u8 cat, u8 class, u8 func,
bool rack, bool dack, u32 len);
-int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif,
struct rtw89_sta *rtwsta, const u8 *scan_mac_addr);
void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
void rtw89_fw_c2h_work(struct work_struct *work);
-int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- enum rtw89_upd_mode upd_mode);
+int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta,
+ enum rtw89_upd_mode upd_mode);
int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
- u8 dis_conn);
+ struct rtw89_sta *rtwsta, bool dis_conn);
int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
bool pause);
int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
-int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
- struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool valid, struct ieee80211_ampdu_params *params);
+
int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
struct rtw89_lps_parm *lps_param);
struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len);
qempty.dle_type = DLE_CTRL_TYPE_PLE;
qempty.grpsel = 0;
+ qempty.qempty = ~(u32)0;
ret = dle_dfi_qempty(rtwdev, &qempty);
if (ret)
rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
}
EXPORT_SYMBOL(rtw89_mac_set_err_status);
-const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = {
+const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie = {
2, 40, 0, 0, 1, 0, 0, 0
};
+EXPORT_SYMBOL(rtw89_hfc_preccfg_pcie);
static int hfc_reset_param(struct rtw89_dev *rtwdev)
{
}
/* PCIE 64 */
-const struct rtw89_dle_size wde_size0 = {
+const struct rtw89_dle_size rtw89_wde_size0 = {
RTW89_WDE_PG_64, 4095, 1,
};
+EXPORT_SYMBOL(rtw89_wde_size0);
/* DLFW */
-const struct rtw89_dle_size wde_size4 = {
+const struct rtw89_dle_size rtw89_wde_size4 = {
RTW89_WDE_PG_64, 0, 4096,
};
+EXPORT_SYMBOL(rtw89_wde_size4);
/* PCIE */
-const struct rtw89_dle_size ple_size0 = {
+const struct rtw89_dle_size rtw89_ple_size0 = {
RTW89_PLE_PG_128, 1520, 16,
};
+EXPORT_SYMBOL(rtw89_ple_size0);
/* DLFW */
-const struct rtw89_dle_size ple_size4 = {
+const struct rtw89_dle_size rtw89_ple_size4 = {
RTW89_PLE_PG_128, 64, 1472,
};
+EXPORT_SYMBOL(rtw89_ple_size4);
/* PCIE 64 */
-const struct rtw89_wde_quota wde_qt0 = {
+const struct rtw89_wde_quota rtw89_wde_qt0 = {
3792, 196, 0, 107,
};
+EXPORT_SYMBOL(rtw89_wde_qt0);
/* DLFW */
-const struct rtw89_wde_quota wde_qt4 = {
+const struct rtw89_wde_quota rtw89_wde_qt4 = {
0, 0, 0, 0,
};
+EXPORT_SYMBOL(rtw89_wde_qt4);
/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt4 = {
+const struct rtw89_ple_quota rtw89_ple_qt4 = {
264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
};
+EXPORT_SYMBOL(rtw89_ple_qt4);
/* PCIE SCC */
-const struct rtw89_ple_quota ple_qt5 = {
+const struct rtw89_ple_quota rtw89_ple_qt5 = {
264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,
};
+EXPORT_SYMBOL(rtw89_ple_qt5);
/* DLFW */
-const struct rtw89_ple_quota ple_qt13 = {
+const struct rtw89_ple_quota rtw89_ple_qt13 = {
0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0
};
+EXPORT_SYMBOL(rtw89_ple_qt13);
static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
enum rtw89_qta_mode mode)
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_stop_sch_tx);
int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
{
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_resume_sch_tx);
static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
bool wd)
rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109);
}
-static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
{
u8 sh = FIELD_GET(GENMASK(4, 0), macid);
u8 grp = macid >> 5;
bcn_int);
}
+static void rtw89_mac_port_cfg_hiq_win(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u32 hiq_win_addr[RTW89_PORT_NUM] = {
+ R_AX_P0MB_HGQ_WINDOW_CFG_0, R_AX_PORT_HGQ_WINDOW_CFG,
+ R_AX_PORT_HGQ_WINDOW_CFG + 1, R_AX_PORT_HGQ_WINDOW_CFG + 2,
+ R_AX_PORT_HGQ_WINDOW_CFG + 3,
+ };
+ u8 win = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ? 16 : 0;
+ u8 port = rtwvif->port;
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_idx(hiq_win_addr[port], rtwvif->mac_idx);
+ rtw89_write8(rtwdev, reg, win);
+}
+
+static void rtw89_mac_port_cfg_hiq_dtim(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u32 addr;
+
+ addr = rtw89_mac_reg_by_idx(R_AX_MD_TSFT_STMP_CTL, rtwvif->mac_idx);
+ rtw89_write8_set(rtwdev, addr, B_AX_UPD_HGQMD | B_AX_UPD_TIMIE);
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->dtim_ctrl, B_AX_DTIM_NUM_MASK,
+ vif->bss_conf.dtim_period);
+}
+
static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif)
{
rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
- ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false);
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwvif->mac_id, false);
if (ret)
return ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_CREATE);
if (ret)
return ret;
if (ret)
return ret;
- ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id);
+ ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif);
if (ret)
return ret;
{
int ret;
- ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_REMOVE);
if (ret)
return ret;
rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_win(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_dtim(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
- rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
fsleep(BCN_ERLY_SET_DLY);
rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
RTW89_GET_C2H_LOG_SRT_PRT(c2h->data));
}
+static void
+rtw89_mac_c2h_bcn_cnt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
static
void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
struct sk_buff *c2h, u32 len) = {
[RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack,
[RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
[RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
+ [RTW89_MAC_C2H_FUNC_BCN_CNT] = rtw89_mac_c2h_bcn_cnt,
};
void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
return false;
}
+EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
return ret;
}
+EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
{
return 0;
}
+EXPORT_SYMBOL(rtw89_mac_coex_init);
int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
{
u32 reg;
- u8 val;
+ u16 val;
int ret;
ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
(plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) |
(plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) |
- (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0);
- rtw89_write8(rtwdev, reg, val);
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0) |
+ B_AX_PLT_EN;
+ rtw89_write16(rtwdev, reg, val);
return 0;
}
RTW89_MAC_C2H_FUNC_REC_ACK,
RTW89_MAC_C2H_FUNC_DONE_ACK,
RTW89_MAC_C2H_FUNC_C2H_LOG,
+ RTW89_MAC_C2H_FUNC_BCN_CNT,
RTW89_MAC_C2H_FUNC_INFO_MAX,
};
MAC_AX_SET_ERR_MAX,
};
-extern const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie;
-extern const struct rtw89_dle_size wde_size0;
-extern const struct rtw89_dle_size wde_size4;
-extern const struct rtw89_dle_size ple_size0;
-extern const struct rtw89_dle_size ple_size4;
-extern const struct rtw89_wde_quota wde_qt0;
-extern const struct rtw89_wde_quota wde_qt4;
-extern const struct rtw89_ple_quota ple_qt4;
-extern const struct rtw89_ple_quota ple_qt5;
-extern const struct rtw89_ple_quota ple_qt13;
+extern const struct rtw89_hfc_prec_cfg rtw89_hfc_preccfg_pcie;
+extern const struct rtw89_dle_size rtw89_wde_size0;
+extern const struct rtw89_dle_size rtw89_wde_size4;
+extern const struct rtw89_dle_size rtw89_ple_size0;
+extern const struct rtw89_dle_size rtw89_ple_size4;
+extern const struct rtw89_wde_quota rtw89_wde_qt0;
+extern const struct rtw89_wde_quota rtw89_wde_qt4;
+extern const struct rtw89_ple_quota rtw89_ple_qt4;
+extern const struct rtw89_ple_quota rtw89_ple_qt5;
+extern const struct rtw89_ple_quota rtw89_ple_qt13;
static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
{
int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool en);
+int rtw89_mac_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause);
static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
{
int ret = 0;
mutex_lock(&rtwdev->mutex);
+ rtwvif->rtwdev = rtwdev;
list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work);
rtw89_leave_ps_mode(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
rtw89_vif_type_mapping(vif, false);
rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port,
- RTW89_MAX_HW_PORT_NUM);
- if (rtwvif->port == RTW89_MAX_HW_PORT_NUM) {
+ RTW89_PORT_NUM);
+ if (rtwvif->port == RTW89_PORT_NUM) {
ret = -ENOSPC;
goto out;
}
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ cancel_work_sync(&rtwvif->update_beacon_work);
+
mutex_lock(&rtwdev->mutex);
rtw89_leave_ps_mode(rtwdev);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP);
rtw89_leave_ps_mode(rtwdev);
*new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
- FIF_BCN_PRBRESP_PROMISC;
+ FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ;
if (changed_flags & FIF_ALLMULTI) {
if (*new_flags & FIF_ALLMULTI)
rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
}
}
+ if (changed_flags & FIF_PROBE_REQ) {
+ if (*new_flags & FIF_PROBE_REQ) {
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_UC_CAM_MATCH;
+ } else {
+ rtwdev->hal.rx_fltr |= B_AX_A_BC_CAM_MATCH;
+ rtwdev->hal.rx_fltr |= B_AX_A_UC_CAM_MATCH;
+ }
+ }
rtw89_write32_mask(rtwdev,
rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
return;
}
+
+ rtw89_vif_type_mapping(vif, true);
+
rtw89_core_sta_assoc(rtwdev, vif, sta);
}
rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
}
+ if (changed & BSS_CHANGED_BEACON)
+ rtw89_fw_h2c_update_beacon(rtwdev, rtwvif);
+
if (changed & BSS_CHANGED_ERP_SLOT)
rtw89_conf_tx(rtwdev, rtwvif);
mutex_unlock(&rtwdev->mutex);
}
+static int rtw89_ops_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ ether_addr_copy(rtwvif->bssid, vif->bss_conf.bssid);
+ rtw89_cam_bssid_changed(rtwdev, rtwvif);
+ rtw89_mac_port_update(rtwdev, rtwvif);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, NULL, RTW89_ROLE_TYPE_CHANGE);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif, NULL, NULL);
+ rtw89_chip_rfk_channel(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static
+void rtw89_ops_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, NULL);
+ rtw89_fw_h2c_join_info(rtwdev, rtwvif, NULL, true);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
+ bool set)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+
+ ieee80211_queue_work(rtwdev->hw, &rtwvif->update_beacon_work);
+
+ return 0;
+}
+
static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 ac,
const struct ieee80211_tx_queue_params *params)
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mutex_lock(&rtwdev->mutex);
clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
- rtw89_fw_h2c_ba_cam(rtwdev, false, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
rtwsta->ampdu_params[tid].agg_num = params->buf_size;
rtwsta->ampdu_params[tid].amsdu = params->amsdu;
rtw89_leave_ps_mode(rtwdev);
- rtw89_fw_h2c_ba_cam(rtwdev, true, rtwsta->mac_id, params);
mutex_unlock(&rtwdev->mutex);
break;
case IEEE80211_AMPDU_RX_START:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, true, params);
+ mutex_unlock(&rtwdev->mutex);
+ break;
case IEEE80211_AMPDU_RX_STOP:
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_h2c_ba_cam(rtwdev, rtwsta, false, params);
+ mutex_unlock(&rtwdev->mutex);
break;
default:
WARN_ON(1);
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
.bss_info_changed = rtw89_ops_bss_info_changed,
+ .start_ap = rtw89_ops_start_ap,
+ .stop_ap = rtw89_ops_stop_ap,
+ .set_tim = rtw89_ops_set_tim,
.conf_tx = rtw89_ops_conf_tx,
.sta_state = rtw89_ops_sta_state,
.set_key = rtw89_ops_set_key,
return 0;
}
-static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 data)
{
- u16 write_addr;
- u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK);
- u8 flag;
- int ret;
-
- write_addr = addr & B_AX_DBI_ADDR_MSK;
- write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK);
- rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data);
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
- if (ret)
- WARN(flag, "failed to write to DBI register, addr=0x%04x\n",
- addr);
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_write_config_byte(pdev, addr, data);
}
-static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
+ u8 *value)
{
- u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
- u8 flag;
- int ret;
-
- rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
- rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
-
- ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
- 10 * RTW89_PCI_WR_RETRY_CNT, false,
- rtwdev, R_AX_DBI_FLAG + 2);
-
- if (!ret) {
- read_addr = R_AX_DBI_RDATA + (addr & 3);
- *value = rtw89_read8(rtwdev, read_addr);
- } else {
- WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
- ret = -EIO;
- }
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
- return ret;
+ return pci_read_config_byte(pdev, addr, value);
}
-static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_set(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value |= bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
-static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+static int rtw89_pci_config_byte_clr(struct rtw89_dev *rtwdev, u16 addr,
+ u8 bit)
{
u8 value;
int ret;
- ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, addr, &value);
if (ret)
return ret;
value &= ~bit;
- ret = rtw89_dbi_write8(rtwdev, addr, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, addr, value);
return ret;
}
rtwdev->chip->chip_id == RTL8852C)
return 0;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n",
+ RTW89_PCIE_PHY_RATE);
return ret;
}
return -EOPNOTSUPP;
}
/* Disable L1BD */
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config read %X\n", RTW89_PCIE_L1_CTRL);
return ret;
}
if (bdr_ori & RTW89_PCIE_BIT_L1) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL,
- bdr_ori & ~RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori & ~RTW89_PCIE_BIT_L1);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
l1_flag = true;
}
/* CLK delay = 0 */
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_0);
end:
/* Set L1BD to ori */
if (l1_flag) {
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori);
if (ret) {
- rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ rtw89_err(rtwdev, "[ERR]pci config write %X\n",
+ RTW89_PCIE_L1_CTRL);
return ret;
}
}
if (rtw89_pci_disable_clkreq)
return;
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL,
- PCIE_CLKDLY_HW_30US);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_30US);
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_CLK);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
if (ret)
rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
enable ? "set" : "unset", ret);
if (rtw89_pci_disable_aspm_l1)
return;
- ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
+ ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
- ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
+ ret = rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
if (ret)
rtw89_err(rtwdev, "failed to read ASPM Delay\n");
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
- RTW89_PCIE_BIT_L1);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
if (ret)
rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
enable ? "set" : "unset", ret);
int ret;
if (enable)
- ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
+ ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
else
- ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
- RTW89_PCIE_BIT_L1SUB);
+ ret = rtw89_pci_config_byte_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
if (ret)
rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
enable ? "set" : "unset", ret);
return;
/* Hardware need write the reg twice to ensure the setting work */
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
- rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
- RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_pci_write_config_byte(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
}
static int __maybe_unused rtw89_pci_resume(struct device *dev)
.napi_poll = rtw89_pci_napi_poll,
};
-static int rtw89_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ieee80211_hw *hw;
struct rtw89_dev *rtwdev;
+ const struct rtw89_driver_info *info;
int driver_data_size;
int ret;
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
- switch (id->driver_data) {
- case RTL8852A:
- rtwdev->chip = &rtw8852a_chip_info;
- break;
- default:
- return -ENOENT;
- }
+ info = (const struct rtw89_driver_info *)id->driver_data;
+ rtwdev->chip = info->chip;
ret = rtw89_core_init(rtwdev);
if (ret) {
return ret;
}
+EXPORT_SYMBOL(rtw89_pci_probe);
-static void rtw89_pci_remove(struct pci_dev *pdev)
+void rtw89_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct rtw89_dev *rtwdev;
rtw89_core_deinit(rtwdev);
ieee80211_free_hw(hw);
}
-
-static const struct pci_device_id rtw89_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A },
- {},
-};
-MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table);
-
-static struct pci_driver rtw89_pci_driver = {
- .name = "rtw89_pci",
- .id_table = rtw89_pci_id_table,
- .probe = rtw89_pci_probe,
- .remove = rtw89_pci_remove,
- .driver.pm = &rtw89_pm_ops,
-};
-module_pci_driver(rtw89_pci_driver);
+EXPORT_SYMBOL(rtw89_pci_remove);
MODULE_AUTHOR("Realtek Corporation");
MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
extern const struct dev_pm_ops rtw89_pm_ops;
+struct pci_device_id;
+
+int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw89_pci_remove(struct pci_dev *pdev);
+
#endif
else if (rssi_lv == 1)
return 0xfffffffffffffff0ULL;
else if (rssi_lv == 2)
- return 0xffffffffffffffe0ULL;
+ return 0xffffffffffffefe0ULL;
else if (rssi_lv == 3)
- return 0xffffffffffffffc0ULL;
+ return 0xffffffffffffcfc0ULL;
else if (rssi_lv == 4)
- return 0xffffffffffffff80ULL;
+ return 0xffffffffffff8f80ULL;
else if (rssi_lv >= 5)
- return 0xffffffffffffff00ULL;
+ return 0xffffffffffff0f00ULL;
return 0xffffffffffffffffULL;
}
+static u64 rtw89_phy_ra_mask_recover(u64 ra_mask, u64 ra_mask_bak)
+{
+ if ((ra_mask & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES)) == 0)
+ ra_mask |= (ra_mask_bak & ~(RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ if (ra_mask == 0)
+ ra_mask |= (ra_mask_bak & (RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES));
+
+ return ra_mask;
+}
+
static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
{
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_ra_info *ra = &rtwsta->ra;
const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
- u64 high_rate_mask = 0;
u64 ra_mask = 0;
+ u64 ra_mask_bak;
u8 mode = 0;
u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
u8 bw_mode = 0;
}
if (rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
mode |= RTW89_RA_MODE_CCK;
else
mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
} else {
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
mode |= RTW89_RA_MODE_OFDM;
}
+ ra_mask_bak = ra_mask;
+
if (mode >= RTW89_RA_MODE_HT) {
+ u64 mask = 0;
for (i = 0; i < rtwdev->hal.tx_nss; i++)
- high_rate_mask |= high_rate_masks[i];
- ra_mask &= high_rate_mask;
+ mask |= high_rate_masks[i];
if (mode & RTW89_RA_MODE_OFDM)
- ra_mask |= RA_MASK_SUBOFDM_RATES;
+ mask |= RA_MASK_SUBOFDM_RATES;
if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
+ mask |= RA_MASK_SUBCCK_RATES;
+ ra_mask &= mask;
} else if (mode & RTW89_RA_MODE_OFDM) {
- if (mode & RTW89_RA_MODE_CCK)
- ra_mask |= RA_MASK_SUBCCK_RATES;
- ra_mask |= RA_MASK_OFDM_RATES;
- } else {
- ra_mask = RA_MASK_CCK_RATES;
+ ra_mask &= (RA_MASK_OFDM_RATES | RA_MASK_SUBCCK_RATES);
}
- if (mode != RTW89_RA_MODE_CCK) {
+ if (mode != RTW89_RA_MODE_CCK)
ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
- ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
- }
+
+ ra_mask = rtw89_phy_ra_mask_recover(ra_mask, ra_mask_bak);
+ ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_80:
return txsc_idx;
}
+EXPORT_SYMBOL(rtw89_phy_get_txsc);
u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
u32 addr, u32 mask)
addr += rtw89_phy0_phy1_offset(rtwdev, addr);
rtw89_phy_write32_mask(rtwdev, addr, mask, data);
}
+EXPORT_SYMBOL(rtw89_phy_write32_idx);
void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
u32 val)
rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
}
}
+EXPORT_SYMBOL(rtw89_phy_write_reg3_tbl);
const u8 rtw89_rs_idx_max[] = {
[RTW89_RS_CCK] = RTW89_RATE_CCK_MAX,
[RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX,
[RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX,
};
+EXPORT_SYMBOL(rtw89_rs_idx_max);
const u8 rtw89_rs_nss_max[] = {
[RTW89_RS_CCK] = 1,
[RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX,
[RTW89_RS_OFFSET] = 1,
};
+EXPORT_SYMBOL(rtw89_rs_nss_max);
static const u8 _byr_of_rs[] = {
[RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
}
}
}
+EXPORT_SYMBOL(rtw89_phy_load_txpwr_byrate);
#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \
({ \
return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_byrate);
static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
{
return min(lmt, sar);
}
+EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
do { \
break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit);
static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
u8 ru, u8 ntx, u8 ch)
break;
}
}
+EXPORT_SYMBOL(rtw89_phy_fill_txpwr_limit_ru);
struct rtw89_phy_iter_ra_data {
struct rtw89_dev *rtwdev;
enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
struct rtw89_dig_info *dig = &rtwdev->dig;
u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
- u32 val = 0;
+ u8 ofdm_cca_th;
+ s8 cck_cca_th;
+ u32 pd_val = 0;
under_region += PD_TH_SB_FLTR_CMP_VAL;
case RTW89_CHANNEL_WIDTH_80:
under_region += PD_TH_BW80_CMP_VAL;
break;
+ case RTW89_CHANNEL_WIDTH_160:
+ under_region += PD_TH_BW160_CMP_VAL;
+ break;
case RTW89_CHANNEL_WIDTH_20:
fallthrough;
default:
dig->dyn_pd_th_max = dig->igi_rssi;
final_rssi = min_t(u8, rssi, dig->igi_rssi);
- final_rssi = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
- PD_TH_MAX_RSSI + under_region);
+ ofdm_cca_th = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
+ PD_TH_MAX_RSSI + under_region);
if (enable) {
- val = (final_rssi - under_region - PD_TH_MIN_RSSI) >> 1;
+ pd_val = (ofdm_cca_th - under_region - PD_TH_MIN_RSSI) >> 1;
rtw89_debug(rtwdev, RTW89_DBG_DIG,
- "dyn_max=%d, final_rssi=%d, total=%d, PD_low=%d\n",
- dig->igi_rssi, final_rssi, under_region, val);
+ "igi=%d, ofdm_ccaTH=%d, backoff=%d, PD_low=%d\n",
+ final_rssi, ofdm_cca_th, under_region, pd_val);
} else {
rtw89_debug(rtwdev, RTW89_DBG_DIG,
"Dynamic PD th disabled, Set PD_low_bd=0\n");
}
rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
- val);
+ pd_val);
rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+
+ if (!rtwdev->hal.support_cckpd)
+ return;
+
+ cck_cca_th = max_t(s8, final_rssi - under_region, CCKPD_TH_MIN_RSSI);
+ pd_val = (u32)(cck_cca_th - IGI_RSSI_MAX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "igi=%d, cck_ccaTH=%d, backoff=%d, cck_PD_low=((%d))dB\n",
+ final_rssi, cck_cca_th, under_region, pd_val);
+
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_EN_V1,
+ B_BMODE_PDTH_LIMIT_EN_MSK_V1, enable);
+ rtw89_phy_write32_mask(rtwdev, R_BMODE_PDTH_V1,
+ B_BMODE_PDTH_LOWER_BOUND_MSK_V1, pd_val);
}
void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
#define RXB_IDX_MAX 31
#define RXB_IDX_MIN 0
+#define IGI_RSSI_MAX 110
#define PD_TH_MAX_RSSI 70
#define PD_TH_MIN_RSSI 8
+#define CCKPD_TH_MIN_RSSI (-18)
+#define PD_TH_BW160_CMP_VAL 9
#define PD_TH_BW80_CMP_VAL 6
#define PD_TH_BW40_CMP_VAL 3
#define PD_TH_BW20_CMP_VAL 0
.size = ARRAY_SIZE(_name), \
}
+extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
+extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
+
static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
u32 addr, u8 data)
{
#define R_AX_DTIM_CTRL_P2 0xC4A6
#define R_AX_DTIM_CTRL_P3 0xC4E6
#define R_AX_DTIM_CTRL_P4 0xC526
-#define B_AX_DTIM_NUM_MASK GENMASK(15, 0)
+#define B_AX_DTIM_NUM_MASK GENMASK(15, 8)
#define B_AX_DTIM_CURRCNT_MASK GENMASK(7, 0)
#define R_AX_TBTT_SHIFT_P0 0xC428
#define B_AX_P0MB2_EN BIT(2)
#define B_AX_P0MB1_EN BIT(1)
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0 0xC590
+#define R_AX_P0MB_HGQ_WINDOW_CFG_0_C1 0xE590
+#define R_AX_PORT_HGQ_WINDOW_CFG 0xC5A0
+#define R_AX_PORT_HGQ_WINDOW_CFG_C1 0xE5A0
+
#define R_AX_AMPDU_AGG_LIMIT 0xC610
#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
#define B_AX_TCR_ZLD_USTIME_AFTERPHYTXON GENMASK(11, 8)
#define B_AX_TCR_TXTIMEOUT GENMASK(7, 0)
+#define R_AX_MD_TSFT_STMP_CTL 0xCA08
+#define R_AX_MD_TSFT_STMP_CTL_C1 0xEA08
+#define B_AX_TSFT_OFS_MASK GENMASK(31, 16)
+#define B_AX_STMP_THSD_MASK GENMASK(15, 8)
+#define B_AX_UPD_HGQMD BIT(1)
+#define B_AX_UPD_TIMIE BIT(0)
+
#define R_AX_PPWRBIT_SETTING 0xCA0C
#define R_AX_PPWRBIT_SETTING_C1 0xEA0C
#define R_CHBW_MOD 0x4978
#define B_CHBW_MOD_PRICH GENMASK(11, 8)
#define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define R_BMODE_PDTH_V1 0x4B64
+#define B_BMODE_PDTH_LOWER_BOUND_MSK_V1 GENMASK(31, 24)
+#define R_BMODE_PDTH_EN_V1 0x4B74
+#define B_BMODE_PDTH_LIMIT_EN_MSK_V1 BIT(30)
#define R_CFO_COMP_SEG1_L 0x5384
#define R_CFO_COMP_SEG1_H 0x5388
#define R_CFO_COMP_SEG1_CTRL 0x538C
static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = {
[RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie,
- &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
- [RTW89_QTA_DLFW] = {NULL, NULL, &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ &rtw89_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw89_hfc_preccfg_pcie,
+ RTW89_HCIFC_POH},
[RTW89_QTA_INVALID] = {NULL},
};
static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
- [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &wde_size0, &ple_size0, &wde_qt0,
- &wde_qt0, &ple_qt4, &ple_qt5},
- [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &wde_size4, &ple_size4,
- &wde_qt4, &wde_qt4, &ple_qt13, &ple_qt13},
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_wde_size0, &rtw89_ple_size0,
+ &rtw89_wde_qt0, &rtw89_wde_qt0, &rtw89_ple_qt4,
+ &rtw89_ple_qt5},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_wde_size4, &rtw89_ple_size4,
+ &rtw89_wde_qt4, &rtw89_wde_qt4, &rtw89_ple_qt13,
+ &rtw89_ple_qt13},
[RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
NULL},
};
.txpwr_factor_rf = 2,
.txpwr_factor_mac = 1,
.dig_table = &rtw89_8852a_phy_dig_table,
+ .support_bands = BIT(NL80211_BAND_2GHZ) |
+ BIT(NL80211_BAND_5GHZ),
.rf_path_num = 2,
.tx_nss = 2,
.rx_nss = 2,
EXPORT_SYMBOL(rtw8852a_chip_info);
MODULE_FIRMWARE("rtw89/rtw8852a_fw.bin");
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852A driver");
+MODULE_LICENSE("Dual BSD/GPL");
u8 duty_cycle;
};
+extern const struct rtw89_chip_info rtw8852a_chip_info;
+
void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
struct rtw8852a_bb_pmac_info *tx_info,
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020-2021 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "rtw8852a.h"
+
+static const struct rtw89_driver_info rtw89_8852ae_info = {
+ .chip = &rtw8852a_chip_info,
+};
+
+static const struct pci_device_id rtw89_8852ae_id_table[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a),
+ .driver_data = (kernel_ulong_t)&rtw89_8852ae_info,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_8852ae_id_table);
+
+static struct pci_driver rtw89_8852ae_driver = {
+ .name = "rtw89_8852ae",
+ .id_table = rtw89_8852ae_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_8852ae_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852AE driver");
+MODULE_LICENSE("Dual BSD/GPL");
#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11)
#define RTW89_TXWD_BODY0_WD_PAGE BIT(7)
#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5)
+#define RTW89_TXWD_BODY0_HW_SSN_SEL GENMASK(3, 2)
+#define RTW89_TXWD_BODY0_HW_SSN_MODE GENMASK(1, 0)
/* TX WD BODY DWORD 1 */
#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16)
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
/* TX WD INFO DWORD 1 */
#define RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(24, 16)
{
int ret;
struct wsm_buf *buf = &priv->wsm_cmd_buf;
- u8 queue_id_to_wmm_aci[] = {3, 2, 0, 1};
+ static const u8 queue_id_to_wmm_aci[] = { 3, 2, 0, 1 };
wsm_cmd_lock(priv);
void ipc_debugfs_init(struct iosm_imem *ipc_imem)
{
- struct dentry *debugfs_pdev = wwan_get_debugfs_dir(ipc_imem->dev);
+ ipc_imem->debugfs_wwan_dir = wwan_get_debugfs_dir(ipc_imem->dev);
ipc_imem->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME,
- debugfs_pdev);
+ ipc_imem->debugfs_wwan_dir);
ipc_imem->trace = ipc_trace_init(ipc_imem);
if (!ipc_imem->trace)
{
ipc_trace_deinit(ipc_imem->trace);
debugfs_remove_recursive(ipc_imem->debugfs_dir);
+ wwan_put_debugfs_dir(ipc_imem->debugfs_wwan_dir);
}
return HRTIMER_NORESTART;
}
+static int ipc_imem_tq_adb_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_mux_ul_adb_finish(ipc_imem->mux);
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_adb_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, adb_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_adb_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
struct ipc_mux_config *cfg)
{
ipc_mmio_update_cp_capability(ipc_imem->mmio);
- if (!ipc_imem->mmio->has_mux_lite) {
+ if (ipc_imem->mmio->mux_protocol == MUX_UNKNOWN) {
dev_err(ipc_imem->dev, "Failed to get Mux capability.");
return -EINVAL;
}
- cfg->protocol = MUX_LITE;
+ cfg->protocol = ipc_imem->mmio->mux_protocol;
cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
MUX_UL_ON_CREDITS :
IPC_MSG_PREP_FEATURE_SET, &prep_args);
}
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not started.
+ * @ipc_imem: Pointer to imem data-struct
+ */
void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
{
/* Use the TD update timer only in the runtime phase */
hrtimer_cancel(hr_timer);
}
+/**
+ * ipc_imem_adb_timer_start - Starts the adb Timer if not starting.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem)
+{
+ if (!hrtimer_active(&ipc_imem->adb_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC);
+ hrtimer_start(&ipc_imem->adb_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+ if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7360_ID &&
+ chnl_cfg_port.wwan_port_type == WWAN_PORT_MBIM) {
+ ctrl_chl_idx++;
+ continue;
+ }
if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
chnl_cfg_port,
}
/* Try to generate new ADB or ADGH. */
- if (ipc_mux_ul_data_encode(ipc_imem->mux))
+ if (ipc_mux_ul_data_encode(ipc_imem->mux)) {
ipc_imem_td_update_timer_start(ipc_imem);
+ if (ipc_imem->mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_imem);
+ }
/* Continue the send procedure with accumulated SIO or NETIF packets.
* Reset the debounce flags.
HRTIMER_MODE_REL);
ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+ hrtimer_init(&ipc_imem->adb_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ipc_imem->adb_timer.function = ipc_imem_adb_timer_cb;
+
if (ipc_imem_config(ipc_imem)) {
dev_err(ipc_imem->dev, "failed to initialize the imem");
goto imem_config_fail;
* @tdupdate_timer: Delay the TD update doorbell.
* @fast_update_timer: forced head pointer update delay timer.
* @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @adb_timer: Timer for finishing the ADB.
* @rom_exit_code: Mapped boot rom exit code.
* @enter_runtime: 1 means the transition to runtime phase was
* executed.
* @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
* @reset_det_n: Reset detect flag
* @pcie_wake_n: Pcie wake flag
+ * @debugfs_wwan_dir: WWAN Debug FS directory entry
* @debugfs_dir: Debug FS directory for driver-specific entries
*/
struct iosm_imem {
struct hrtimer tdupdate_timer;
struct hrtimer fast_update_timer;
struct hrtimer td_alloc_timer;
+ struct hrtimer adb_timer;
enum rom_exit_code rom_exit_code;
u32 enter_runtime;
struct completion ul_pend_sem;
reset_det_n:1,
pcie_wake_n:1;
#ifdef CONFIG_WWAN_DEBUGFS
+ struct dentry *debugfs_wwan_dir;
struct dentry *debugfs_dir;
#endif
};
* Returns: 0 on success, -1 on failure
*/
int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
+
+void ipc_imem_adb_timer_start(struct iosm_imem *ipc_imem);
+
#endif
#include <linux/slab.h>
#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_mux.h"
/* Definition of MMIO offsets
* note that MMIO_CI offsets are relative to end of chip info structure
ver = ipc_mmio_get_cp_version(ipc_mmio);
cp_cap = ioread32(ipc_mmio->base + ipc_mmio->offset.cp_capability);
- ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
- !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
+ ipc_mmio->mux_protocol = ((ver >= IOSM_CP_VERSION) && (cp_cap &
+ (UL_AGGR | DL_AGGR))) ? MUX_AGGREGATION
+ : MUX_LITE;
ipc_mmio->has_ul_flow_credit =
(ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
* @context_info_addr: Physical base address of context info structure
* @chip_info_version: Version of chip info structure
* @chip_info_size: Size of chip info structure
- * @has_mux_lite: It doesn't support mux aggergation
+ * @mux_protocol: mux protocol
* @has_ul_flow_credit: Ul flow credit support
* @has_slp_no_prot: Device sleep no protocol support
* @has_mcr_support: Usage of mcr support
phys_addr_t context_info_addr;
unsigned int chip_info_version;
unsigned int chip_info_size;
- u8 has_mux_lite:1,
- has_ul_flow_credit:1,
+ u32 mux_protocol;
+ u8 has_ul_flow_credit:1,
has_slp_no_prot:1,
has_mcr_support:1;
};
struct iosm_imem *imem)
{
struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
- int i, ul_tds, ul_td_size;
+ int i, j, ul_tds, ul_td_size;
struct sk_buff_head *free_list;
struct sk_buff *skb;
+ int qlt_size;
if (!ipc_mux)
return NULL;
ipc_mux->channel_id = -1;
ipc_mux->channel = NULL;
+ if (ipc_mux->protocol != MUX_LITE) {
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < IPC_MEM_MUX_IP_SESSION_ENTRIES; i++) {
+ ipc_mux->ul_adb.pp_qlt[i] = kzalloc(qlt_size,
+ GFP_ATOMIC);
+ if (!ipc_mux->ul_adb.pp_qlt[i]) {
+ for (j = i - 1; j >= 0; j--)
+ kfree(ipc_mux->ul_adb.pp_qlt[j]);
+ return NULL;
+ }
+ }
+
+ ul_td_size = IPC_MEM_MAX_UL_ADB_BUF_SIZE;
+ ul_tds = IPC_MEM_MAX_TDS_MUX_AGGR_UL;
+ }
+
/* Allocate the list of UL ADB. */
for (i = 0; i < ul_tds; i++) {
dma_addr_t mapping;
#include "iosm_ipc_protocol.h"
-/* Size of the buffer for the IP MUX data buffer. */
-#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024)
-#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE
+#define IPC_MEM_MAX_UL_DG_ENTRIES 100
+#define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60
+
+#define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
+#define IPC_MEM_MAX_DL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE
/* Size of the buffer for the IP MUX Lite data buffer. */
#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
enum ipc_mux_protocol {
MUX_UNKNOWN,
MUX_LITE,
+ MUX_AGGREGATION,
};
/* Supported UL data transfer methods. */
flush:1; /* flush net interface ? */
};
-/* State of a single UL data block. */
+/**
+ * struct mux_adth_dg - Structure of the datagram in the Aggregated Datagram
+ * Table Header.
+ * @datagram_index : Index (in bytes) to the k-th datagram in the table.
+ * Index shall count from the start of the block including
+ * the 16-byte header. This value shall be non-zero.
+ * @datagram_length: Length of the k-th datagram including the head padding.
+ * This value shall be non-zero.
+ * @service_class: Service class identifier for the datagram.
+ * @reserved: Reserved bytes. Set to zero
+ */
+struct mux_adth_dg {
+ __le32 datagram_index;
+ __le16 datagram_length;
+ u8 service_class;
+ u8 reserved;
+};
+
+/**
+ * struct mux_qlth_ql - Structure of the queue level in the Aggregated
+ * Datagram Queue Level Table Header.
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_qlth_ql {
+ __le32 nr_of_bytes;
+};
+
+/**
+ * struct mux_qlth - Structure of Aggregated Datagram Queue Level Table
+ * Header.
+ * @signature: Signature of the Queue Level Table Header
+ * Value: 0x48544C51 (ASCII characters: 'Q' 'L' 'T' 'H')
+ * @table_length: Length (in bytes) of the datagram table. This length
+ * shall include the queue level table header size.
+ * Minimum value:0x10
+ * @if_id: ID of the interface the queue levels in the table
+ * belong to.
+ * @reserved: Reserved byte. Set to zero.
+ * @next_table_index: Index (in bytes) to the next table in the buffer. Index
+ * shall count from the start of the block including the
+ * 16-byte header. Value of zero indicates end of the list.
+ * @reserved2: Reserved bytes. Set to zero
+ * @ql: Queue level table with variable length
+ */
+struct mux_qlth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_qlth_ql ql;
+};
+
+/**
+ * struct mux_adb - Structure of State of a single UL data block.
+ * @dest_skb: Current UL skb for the data block.
+ * @buf: ADB memory
+ * @adgh: ADGH pointer
+ * @qlth_skb: QLTH pointer
+ * @next_table_index: Pointer to next table index.
+ * @free_list: List of alloc. ADB for the UL sess.
+ * @size: Size of the ADB memory.
+ * @if_cnt: Statistic counter
+ * @dg_cnt_total: Datagram count total
+ * @payload_size: Payload Size
+ * @dg: Datagram table.
+ * @pp_qlt: Pointers to hold Queue Level Tables of session
+ * @adbh: ADBH pointer
+ * @qlt_updated: Queue level table updated
+ * @dg_count: Datagram count
+ */
struct mux_adb {
- struct sk_buff *dest_skb; /* Current UL skb for the data block. */
- u8 *buf; /* ADB memory. */
- struct mux_adgh *adgh; /* ADGH pointer */
- struct sk_buff *qlth_skb; /* QLTH pointer */
- u32 *next_table_index; /* Pointer to next table index. */
- struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/
- int size; /* Size of the ADB memory. */
- u32 if_cnt; /* Statistic counter */
+ struct sk_buff *dest_skb;
+ u8 *buf;
+ struct mux_adgh *adgh;
+ struct sk_buff *qlth_skb;
+ u32 *next_table_index;
+ struct sk_buff_head free_list;
+ int size;
+ u32 if_cnt;
u32 dg_cnt_total;
u32 payload_size;
+ struct mux_adth_dg
+ dg[IPC_MEM_MUX_IP_SESSION_ENTRIES][IPC_MEM_MAX_UL_DG_ENTRIES];
+ struct mux_qlth *pp_qlt[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct mux_adbh *adbh;
+ u32 qlt_updated[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ u32 dg_count[IPC_MEM_MUX_IP_SESSION_ENTRIES];
};
-/* Temporary ACB state. */
+/**
+ * struct mux_acb - Structure of Temporary ACB state.
+ * @skb: Used UL skb.
+ * @if_id: Session id.
+ * @buf_p: Command buffer.
+ * @wanted_response: Wanted Response
+ * @got_response: Got response
+ * @cmd: command
+ * @got_param: Received command/response parameter
+ */
struct mux_acb {
struct sk_buff *skb; /* Used UL skb. */
int if_id; /* Session id. */
+ u8 *buf_p;
u32 wanted_response;
u32 got_response;
u32 cmd;
* @wwan_q_offset: This will hold the offset of the given instance
* Useful while passing or receiving packets from
* wwan/imem layer.
+ * @adb_finish_timer: Timer for forcefully finishing the ADB
+ * @acb_tx_sequence_nr: Sequence number for the ACB header.
+ * @params: user configurable parameters
+ * @adb_tx_sequence_nr: Sequence number for ADB header
+ * @acc_adb_size: Statistic data for logging
+ * @acc_payload_size: Statistic data for logging
* @initialized: MUX object is initialized
* @ev_mux_net_transmit_pending:
* 0 means inform the IPC tasklet to pass the
long long ul_data_pend_bytes;
struct mux_acb acb;
int wwan_q_offset;
+ struct hrtimer adb_finish_timer;
+ u16 acb_tx_sequence_nr;
+ struct ipc_params *params;
+ u16 adb_tx_sequence_nr;
+ unsigned long long acc_adb_size;
+ unsigned long long acc_payload_size;
u8 initialized:1,
ev_mux_net_transmit_pending:1,
- adb_prep_ongoing:1;
-};
+ adb_prep_ongoing;
+} __packed;
/* MUX configuration structure */
struct ipc_mux_config {
return 0;
}
+/* Initialize the command header. */
+static void ipc_mux_acb_init(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_acbh *header;
+
+ header = (struct mux_acbh *)(acb->skb)->data;
+ header->block_length = cpu_to_le32(sizeof(struct mux_acbh));
+ header->first_cmd_index = header->block_length;
+ header->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ACBH);
+ header->sequence_nr = cpu_to_le16(ipc_mux->acb_tx_sequence_nr++);
+}
+
+/* Add a command to the ACB. */
+static struct mux_cmdh *ipc_mux_acb_add_cmd(struct iosm_mux *ipc_mux, u32 cmd,
+ void *param, u32 param_size)
+{
+ struct mux_acbh *header;
+ struct mux_cmdh *cmdh;
+ struct mux_acb *acb;
+
+ acb = &ipc_mux->acb;
+ header = (struct mux_acbh *)(acb->skb)->data;
+ cmdh = (struct mux_cmdh *)
+ ((acb->skb)->data + le32_to_cpu(header->block_length));
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le32_to_cpu(header->block_length) +
+ le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
/* Prepare mux Command */
static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
u32 cmd, struct mux_acb *acb,
size_t res_size, bool blocking, bool respond)
{
struct mux_acb *acb = &ipc_mux->acb;
- struct mux_lite_cmdh *ack_lite;
+ union mux_type_cmdh cmdh;
int ret = 0;
acb->if_id = if_id;
if (ret)
return ret;
- ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
- res_size);
- if (respond)
- ack_lite->transaction_id = cpu_to_le32(transaction_id);
+ if (ipc_mux->protocol == MUX_LITE) {
+ cmdh.ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb,
+ param, res_size);
+ if (respond)
+ cmdh.ack_lite->transaction_id =
+ cpu_to_le32(transaction_id);
+ } else {
+ /* Initialize the ACB header. */
+ ipc_mux_acb_init(ipc_mux);
+ cmdh.ack_aggr = ipc_mux_acb_add_cmd(ipc_mux, cmd_type, param,
+ res_size);
+
+ if (respond)
+ cmdh.ack_aggr->transaction_id =
+ cpu_to_le32(transaction_id);
+ }
ret = ipc_mux_acb_send(ipc_mux, blocking);
return ret;
}
static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+ union mux_cmd_param param,
+ __le32 command_type, u8 if_id,
+ __le32 transaction_id)
{
struct mux_acb *acb = &ipc_mux->acb;
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_CMD_OPEN_SESSION_RESP:
case MUX_CMD_CLOSE_SESSION_RESP:
/* Resume the control application. */
- acb->got_param = cmdh->param;
+ acb->got_param = param;
break;
case MUX_LITE_CMD_FLOW_CTL_ACK:
if (ipc_mux->protocol != MUX_LITE)
return -EINVAL;
- dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
- cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
+ dev_dbg(ipc_mux->dev, "if_id %u FLOW_CTL_ACK %u received",
+ if_id, le32_to_cpu(transaction_id));
+ break;
+
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Lite version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol == MUX_LITE)
+ return -EINVAL;
break;
default:
}
acb->wanted_response = MUX_CMD_INVALID;
- acb->got_response = le32_to_cpu(cmdh->command_type);
+ acb->got_response = le32_to_cpu(command_type);
complete(&ipc_mux->channel->ul_sem);
return 0;
}
-static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
- struct mux_lite_cmdh *cmdh)
+static int ipc_mux_dl_cmds_decode_process(struct iosm_mux *ipc_mux,
+ union mux_cmd_param *param,
+ __le32 command_type, u8 if_id,
+ __le16 cmd_len, int size)
{
- union mux_cmd_param *param = &cmdh->param;
struct mux_session *session;
- int new_size;
+ struct hrtimer *adb_timer;
dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
- cmdh->if_id, le32_to_cpu(cmdh->command_type));
+ if_id, le32_to_cpu(command_type));
- switch (le32_to_cpu(cmdh->command_type)) {
+ switch (le32_to_cpu(command_type)) {
case MUX_LITE_CMD_FLOW_CTL:
+ case IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE:
- if (cmdh->if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES) {
dev_err(ipc_mux->dev, "if_id [%d] not valid",
- cmdh->if_id);
+ if_id);
return -EINVAL; /* No session interface id. */
}
- session = &ipc_mux->session[cmdh->if_id];
+ session = &ipc_mux->session[if_id];
+ adb_timer = &ipc_mux->imem->adb_timer;
- new_size = offsetof(struct mux_lite_cmdh, param) +
- sizeof(param->flow_ctl);
if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
* to limit uplink session queueing
*/
session->net_tx_stop = true;
+
+ /* We have to call Finish ADB here.
+ * Otherwise any already queued data
+ * will be sent to CP when ADB is full
+ * for some other sessions.
+ */
+ if (ipc_mux->protocol == MUX_AGGREGATION) {
+ ipc_mux_ul_adb_finish(ipc_mux);
+ ipc_imem_hrtimer_stop(adb_timer);
+ }
/* Update the stats */
session->flow_ctl_en_cnt++;
} else if (param->flow_ctl.mask == 0) {
* our internal Tx flag and enabling kernel
* flow control
*/
+ dev_dbg(ipc_mux->dev, "if_id[%u] flow_ctl mask 0x%08X",
+ if_id, le32_to_cpu(param->flow_ctl.mask));
/* Backward Compatibility */
- if (cmdh->cmd_len == cpu_to_le16(new_size))
+ if (cmd_len == cpu_to_le16(size))
session->flow_ctl_mask =
le32_to_cpu(param->flow_ctl.mask);
else
break;
}
- dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
+ ipc_mux->acc_adb_size = 0;
+ ipc_mux->acc_payload_size = 0;
+
+ dev_dbg(ipc_mux->dev, "if_id[%u] FLOW CTRL 0x%08X", if_id,
le32_to_cpu(param->flow_ctl.mask));
break;
{
struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
__le32 trans_id = cmdh->transaction_id;
+ int size;
- if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->transaction_id)) {
/* Unable to decode command response indicates the cmd_type
* may be a command instead of response. So try to decoding it.
*/
- if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
+ size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->cmd_len, size)) {
/* Decoded command may need a response. Give the
* response according to the command type.
*/
adgh = (struct mux_adgh *)block;
- if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
+ if (adgh->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "invalid ADGH signature received");
return;
}
ipc_mux->session[if_id].flush = 1;
}
+static void ipc_mux_dl_acbcmd_decode(struct iosm_mux *ipc_mux,
+ struct mux_cmdh *cmdh, int size)
+{
+ u32 link_st = IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP;
+ u32 fctl_dis = IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE;
+ u32 fctl_ena = IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE;
+ u32 fctl_ack = IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK;
+ union mux_cmd_param *cmd_p = NULL;
+ u32 cmd = link_st;
+ u32 trans_id;
+
+ if (!ipc_mux_dl_cmds_decode_process(ipc_mux, &cmdh->param,
+ cmdh->command_type, cmdh->if_id,
+ cmdh->cmd_len, size)) {
+ size = 0;
+ if (cmdh->command_type == cpu_to_le32(link_st)) {
+ cmd_p = &cmdh->param;
+ cmd_p->link_status_resp.response = MUX_CMD_RESP_SUCCESS;
+ } else if ((cmdh->command_type == cpu_to_le32(fctl_ena)) ||
+ (cmdh->command_type == cpu_to_le32(fctl_dis))) {
+ cmd = fctl_ack;
+ } else {
+ return;
+ }
+ trans_id = le32_to_cpu(cmdh->transaction_id);
+ ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ trans_id, cmd_p, size, false, true);
+ }
+}
+
+/* Decode an aggregated command block. */
+static void ipc_mux_dl_acb_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_acbh *acbh;
+ struct mux_cmdh *cmdh;
+ u32 next_cmd_index;
+ u8 *block;
+ int size;
+
+ acbh = (struct mux_acbh *)(skb->data);
+ block = (u8 *)(skb->data);
+
+ next_cmd_index = le32_to_cpu(acbh->first_cmd_index);
+ next_cmd_index = array_index_nospec(next_cmd_index,
+ sizeof(struct mux_cmdh));
+
+ while (next_cmd_index != 0) {
+ cmdh = (struct mux_cmdh *)&block[next_cmd_index];
+ next_cmd_index = le32_to_cpu(cmdh->next_cmd_index);
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh->param,
+ cmdh->command_type,
+ cmdh->if_id,
+ cmdh->transaction_id)) {
+ size = offsetof(struct mux_cmdh, param) +
+ sizeof(cmdh->param.flow_ctl);
+ ipc_mux_dl_acbcmd_decode(ipc_mux, cmdh, size);
+ }
+ }
+}
+
+/* process datagram */
+static int mux_dl_process_dg(struct iosm_mux *ipc_mux, struct mux_adbh *adbh,
+ struct mux_adth_dg *dg, struct sk_buff *skb,
+ int if_id, int nr_of_dg)
+{
+ u32 dl_head_pad_len = ipc_mux->session[if_id].dl_head_pad_len;
+ u32 packet_offset, i, rc;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index)
+ < sizeof(struct mux_adbh))
+ goto dg_error;
+
+ /* Is the packet inside of the ADB */
+ if (le32_to_cpu(dg->datagram_index) >=
+ le32_to_cpu(adbh->block_length)) {
+ goto dg_error;
+ } else {
+ packet_offset =
+ le32_to_cpu(dg->datagram_index) +
+ dl_head_pad_len;
+ /* Pass the packet to the netif layer. */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, ipc_mux->wwan,
+ packet_offset,
+ dg->service_class,
+ skb);
+ if (rc)
+ goto dg_error;
+ }
+ }
+ return 0;
+dg_error:
+ return -1;
+}
+
+/* Decode an aggregated data block. */
+static void mux_dl_adb_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ struct mux_adth_dg *dg;
+ struct iosm_wwan *wwan;
+ struct mux_adbh *adbh;
+ struct mux_adth *adth;
+ int nr_of_dg, if_id;
+ u32 adth_index;
+ u8 *block;
+
+ block = skb->data;
+ adbh = (struct mux_adbh *)block;
+
+ /* Process the aggregated datagram tables. */
+ adth_index = le32_to_cpu(adbh->first_table_index);
+
+ /* Has CP sent an empty ADB ? */
+ if (adth_index < 1) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ goto adb_decode_err;
+ }
+
+ /* Loop through mixed session tables. */
+ while (adth_index) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)(block + adth_index);
+
+ /* Get the interface id and map it to the netif id. */
+ if_id = adth->if_id;
+ if (if_id >= IPC_MEM_MUX_IP_SESSION_ENTRIES)
+ goto adb_decode_err;
+
+ if_id = array_index_nospec(if_id,
+ IPC_MEM_MUX_IP_SESSION_ENTRIES);
+
+ /* Is the session active ? */
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan)
+ goto adb_decode_err;
+
+ /* Consistency checks for aggregated datagram table. */
+ if (adth->signature != cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH))
+ goto adb_decode_err;
+
+ if (le16_to_cpu(adth->table_length) < (sizeof(struct mux_adth) -
+ sizeof(struct mux_adth_dg)))
+ goto adb_decode_err;
+
+ /* Calculate the number of datagrams. */
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ /* Is the datagram table empty ? */
+ if (nr_of_dg < 1) {
+ dev_err(ipc_mux->dev,
+ "adthidx=%u,nr_of_dg=%d,next_tblidx=%u",
+ adth_index, nr_of_dg,
+ le32_to_cpu(adth->next_table_index));
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ continue;
+ }
+
+ /* New aggregated datagram table. */
+ dg = &adth->dg;
+ if (mux_dl_process_dg(ipc_mux, adbh, dg, skb, if_id,
+ nr_of_dg) < 0)
+ goto adb_decode_err;
+
+ /* mark session for final flush */
+ ipc_mux->session[if_id].flush = 1;
+
+ /* Move to the next aggregated datagram table. */
+ adth_index = le32_to_cpu(adth->next_table_index);
+ }
+
+adb_decode_err:
+ return;
+}
+
+/**
+ * ipc_mux_dl_decode - Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
{
u32 signature;
signature = le32_to_cpup((__le32 *)skb->data);
switch (signature) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH: /* Aggregated Data Block Header */
+ mux_dl_adb_decode(ipc_mux, skb);
+ break;
+ case IOSM_AGGR_MUX_SIG_ADGH:
ipc_mux_dl_adgh_decode(ipc_mux, skb);
break;
-
case MUX_SIG_FCTH:
ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
break;
-
+ case IOSM_AGGR_MUX_SIG_ACBH: /* Aggregated Command Block Header */
+ ipc_mux_dl_acb_decode(ipc_mux, skb);
+ break;
case MUX_SIG_CMDH:
ipc_mux_dl_cmd_decode(ipc_mux, skb);
break;
{
/* Take the first element of the free list. */
struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ u32 no_if = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+ u32 *next_tb_id;
int qlt_size;
+ u32 if_id;
if (!skb)
return -EBUSY; /* Wait for a free ADB skb. */
IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
switch (type) {
- case MUX_SIG_ADGH:
+ case IOSM_AGGR_MUX_SIG_ADBH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_ADB_BUF_SIZE;
+
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ /* Initialize the ADBH. */
+ ul_adb->adbh = (struct mux_adbh *)ul_adb->buf;
+ memset(ul_adb->adbh, 0, sizeof(struct mux_adbh));
+ ul_adb->adbh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADBH);
+ ul_adb->adbh->block_length =
+ cpu_to_le32(sizeof(struct mux_adbh));
+ next_tb_id = (unsigned int *)&ul_adb->adbh->first_table_index;
+ ul_adb->next_table_index = next_tb_id;
+
+ /* Clear the local copy of DGs for new ADB */
+ memset(ul_adb->dg, 0, sizeof(ul_adb->dg));
+
+ /* Clear the DG count and QLT updated status for new ADB */
+ for (if_id = 0; if_id < no_if; if_id++) {
+ ul_adb->dg_count[if_id] = 0;
+ ul_adb->qlt_updated[if_id] = 0;
+ }
+ break;
+
+ case IOSM_AGGR_MUX_SIG_ADGH:
/* Save the ADB memory settings. */
ul_adb->dest_skb = skb;
ul_adb->buf = skb->data;
str, bytes);
}
+static void ipc_mux_ul_encode_adth(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, int *out_offset)
+{
+ int i, qlt_size, offset = *out_offset;
+ struct mux_qlth *p_adb_qlt;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u16 adth_dg_size;
+ u32 *next_tb_id;
+
+ qlt_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ if (ul_adb->dg_count[i] > 0) {
+ adth_dg_size = offsetof(struct mux_adth, dg) +
+ ul_adb->dg_count[i] * sizeof(*dg);
+
+ *ul_adb->next_table_index = offset;
+ adth = (struct mux_adth *)&ul_adb->buf[offset];
+ next_tb_id = (unsigned int *)&adth->next_table_index;
+ ul_adb->next_table_index = next_tb_id;
+ offset += adth_dg_size;
+ adth->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH);
+ adth->if_id = i;
+ adth->table_length = cpu_to_le16(adth_dg_size);
+ adth_dg_size -= offsetof(struct mux_adth, dg);
+ memcpy(&adth->dg, ul_adb->dg[i], adth_dg_size);
+ ul_adb->if_cnt++;
+ }
+
+ if (ul_adb->qlt_updated[i]) {
+ *ul_adb->next_table_index = offset;
+ p_adb_qlt = (struct mux_qlth *)&ul_adb->buf[offset];
+ ul_adb->next_table_index =
+ (u32 *)&p_adb_qlt->next_table_index;
+ memcpy(p_adb_qlt, ul_adb->pp_qlt[i], qlt_size);
+ offset += qlt_size;
+ }
+ }
+ *out_offset = offset;
+}
+
+/**
+ * ipc_mux_ul_adb_finish - Add the TD of the aggregated session packets to TDR.
+ * @ipc_mux: Pointer to MUX data-struct.
+ */
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux)
+{
+ bool ul_data_pend = false;
+ struct mux_adb *ul_adb;
+ unsigned long flags;
+ int offset;
+
+ ul_adb = &ipc_mux->ul_adb;
+ if (!ul_adb->dest_skb)
+ return;
+
+ offset = *ul_adb->next_table_index;
+ ipc_mux_ul_encode_adth(ipc_mux, ul_adb, &offset);
+ ul_adb->adbh->block_length = cpu_to_le32(offset);
+
+ if (le32_to_cpu(ul_adb->adbh->block_length) > ul_adb->size) {
+ ul_adb->dest_skb = NULL;
+ return;
+ }
+
+ *ul_adb->next_table_index = 0;
+ ul_adb->adbh->sequence_nr = cpu_to_le16(ipc_mux->adb_tx_sequence_nr++);
+ skb_put(ul_adb->dest_skb, le32_to_cpu(ul_adb->adbh->block_length));
+
+ spin_lock_irqsave(&(&ipc_mux->channel->ul_list)->lock, flags);
+ __skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ spin_unlock_irqrestore(&(&ipc_mux->channel->ul_list)->lock, flags);
+
+ ul_adb->dest_skb = NULL;
+ /* Updates the TDs with ul_list */
+ ul_data_pend = ipc_imem_ul_write_td(ipc_mux->imem);
+
+ /* Delay the doorbell irq */
+ if (ul_data_pend)
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ ipc_mux->acc_adb_size += le32_to_cpu(ul_adb->adbh->block_length);
+ ipc_mux->acc_payload_size += ul_adb->payload_size;
+ ipc_mux->ul_data_pend_bytes += ul_adb->payload_size;
+}
+
/* Allocates an ADB from the free list and initializes it with ADBH */
static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
struct mux_adb *adb, int *size_needed,
while (nr_of_pkts > 0) {
/* get destination skb allocated */
if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
- MUX_SIG_ADGH)) {
+ IOSM_AGGR_MUX_SIG_ADGH)) {
dev_err(ipc_mux->dev, "no reserved memory for ADGH");
return -ENOMEM;
}
memcpy(adb->buf + offset + pad_len, src_skb->data,
src_skb->len);
- adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
+ adb->adgh->signature = cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH);
adb->adgh->if_id = session_id;
adb->adgh->length =
cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
return adb_updated;
}
+/**
+ * ipc_mux_ul_adb_update_ql - Adds Queue Level Table and Queue Level to ADB
+ * @ipc_mux: pointer to MUX instance data
+ * @p_adb: pointer to UL aggegated data block
+ * @session_id: session id
+ * @qlth_n_ql_size: Length (in bytes) of the datagram table
+ * @ul_list: pointer to skb buffer head
+ */
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list)
+{
+ int qlevel = ul_list->qlen;
+ struct mux_qlth *p_qlt;
+
+ p_qlt = (struct mux_qlth *)p_adb->pp_qlt[session_id];
+
+ /* Initialize QLTH if not been done */
+ if (p_adb->qlt_updated[session_id] == 0) {
+ p_qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ p_qlt->if_id = session_id;
+ p_qlt->table_length = cpu_to_le16(qlth_n_ql_size);
+ p_qlt->reserved = 0;
+ p_qlt->reserved2 = 0;
+ }
+
+ /* Update Queue Level information always */
+ p_qlt->ql.nr_of_bytes = cpu_to_le32(qlevel);
+ p_adb->qlt_updated[session_id] = 1;
+}
+
+/* Update the next table index. */
+static int mux_ul_dg_update_tbl_index(struct iosm_mux *ipc_mux,
+ int session_id,
+ struct sk_buff_head *ul_list,
+ struct mux_adth_dg *dg,
+ int aligned_size,
+ u32 qlth_n_ql_size,
+ struct mux_adb *adb,
+ struct sk_buff *src_skb)
+{
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ ipc_mux_ul_adb_finish(ipc_mux);
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH)) {
+ dev_kfree_skb(src_skb);
+ return -ENOMEM;
+ }
+ ipc_mux->size_needed = le32_to_cpu(adb->adbh->block_length);
+
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+ return 0;
+}
+
+/* Process encode session UL data. */
+static int mux_ul_dg_encode(struct iosm_mux *ipc_mux, struct mux_adb *adb,
+ struct mux_adth_dg *dg,
+ struct sk_buff_head *ul_list,
+ struct sk_buff *src_skb, int session_id,
+ int pkt_to_send, u32 qlth_n_ql_size,
+ int *out_offset, int head_pad_len)
+{
+ int aligned_size;
+ int offset = *out_offset;
+ unsigned long flags;
+ int nr_of_skb = 0;
+
+ while (pkt_to_send > 0) {
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ pkt_to_send);
+ return -1;
+ }
+ aligned_size = ALIGN((head_pad_len + src_skb->len), 4);
+ ipc_mux->size_needed += sizeof(*dg) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size ||
+ ((ipc_mux->size_needed + ipc_mux->ul_data_pend_bytes) >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B)) {
+ *adb->next_table_index = offset;
+ if (mux_ul_dg_update_tbl_index(ipc_mux, session_id,
+ ul_list, dg,
+ aligned_size,
+ qlth_n_ql_size, adb,
+ src_skb) < 0)
+ return -ENOMEM;
+ nr_of_skb = 0;
+ offset = le32_to_cpu(adb->adbh->block_length);
+ /* Load pointer to next available datagram entry */
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+ }
+ /* Add buffer without head padding to next pending transfer. */
+ memcpy(adb->buf + offset + head_pad_len,
+ src_skb->data, src_skb->len);
+ /* Setup datagram entry. */
+ dg->datagram_index = cpu_to_le32(offset);
+ dg->datagram_length = cpu_to_le16(src_skb->len + head_pad_len);
+ dg->service_class = (((struct sk_buff *)src_skb)->priority);
+ dg->reserved = 0;
+ adb->dg_cnt_total++;
+ adb->payload_size += le16_to_cpu(dg->datagram_length);
+ dg++;
+ adb->dg_count[session_id]++;
+ offset += aligned_size;
+ /* Remove the processed elements and free it. */
+ spin_lock_irqsave(&ul_list->lock, flags);
+ src_skb = __skb_dequeue(ul_list);
+ spin_unlock_irqrestore(&ul_list->lock, flags);
+
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+ pkt_to_send--;
+ }
+ *out_offset = offset;
+ return nr_of_skb;
+}
+
+/* Process encode session UL data to ADB. */
+static int mux_ul_adb_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list, struct mux_adb *adb,
+ int pkt_to_send)
+{
+ int adb_updated = -EINVAL;
+ int head_pad_len, offset;
+ struct sk_buff *src_skb = NULL;
+ struct mux_adth_dg *dg;
+ u32 qlth_n_ql_size;
+
+ /* If any of the opened session has set Flow Control ON then limit the
+ * UL data to mux_flow_ctrl_high_thresh_b bytes
+ */
+ if (ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+ return adb_updated;
+ }
+
+ qlth_n_ql_size = offsetof(struct mux_qlth, ql) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_qlth_ql);
+ head_pad_len = session->ul_head_pad_len;
+
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ head_pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ IOSM_AGGR_MUX_SIG_ADBH))
+ return -ENOMEM;
+
+ offset = le32_to_cpu(adb->adbh->block_length);
+
+ if (ipc_mux->size_needed == 0)
+ ipc_mux->size_needed = offset;
+
+ /* Calculate the size needed for ADTH, QLTH and QL*/
+ if (adb->dg_count[session_id] == 0) {
+ ipc_mux->size_needed += offsetof(struct mux_adth, dg);
+ ipc_mux->size_needed += qlth_n_ql_size;
+ }
+
+ dg = adb->dg[session_id] + adb->dg_count[session_id];
+
+ if (mux_ul_dg_encode(ipc_mux, adb, dg, ul_list, src_skb,
+ session_id, pkt_to_send, qlth_n_ql_size, &offset,
+ head_pad_len) > 0) {
+ adb_updated = 1;
+ *adb->next_table_index = offset;
+ ipc_mux_ul_adb_update_ql(ipc_mux, adb, session_id,
+ qlth_n_ql_size, ul_list);
+ adb->adbh->block_length = cpu_to_le32(offset);
+ }
+
+ return adb_updated;
+}
+
bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
{
struct sk_buff_head *ul_list;
* -> try next session id.
*/
continue;
-
- updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
- ul_list, &ipc_mux->ul_adb,
- dg_n);
+ if (ipc_mux->protocol == MUX_LITE)
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
+ else
+ updated = mux_ul_adb_encode(ipc_mux, session_id,
+ session, ul_list,
+ &ipc_mux->ul_adb,
+ dg_n);
}
ipc_mux->adb_prep_ongoing = false;
return updated == 1;
}
-void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+/* Calculates the Payload from any given ADB. */
+static int ipc_mux_get_payload_from_adb(struct iosm_mux *ipc_mux,
+ struct mux_adbh *p_adbh)
{
- struct mux_adgh *adgh;
- u16 adgh_len;
+ struct mux_adth_dg *dg;
+ struct mux_adth *adth;
+ u32 payload_size = 0;
+ u32 next_table_idx;
+ int nr_of_dg, i;
+
+ /* Process the aggregated datagram tables. */
+ next_table_idx = le32_to_cpu(p_adbh->first_table_index);
+
+ if (next_table_idx < sizeof(struct mux_adbh)) {
+ dev_err(ipc_mux->dev, "unexpected empty ADB");
+ return payload_size;
+ }
- adgh = (struct mux_adgh *)skb->data;
- adgh_len = le16_to_cpu(adgh->length);
+ while (next_table_idx != 0) {
+ /* Get the reference to the table header. */
+ adth = (struct mux_adth *)((u8 *)p_adbh + next_table_idx);
- if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
- ipc_mux->ul_flow == MUX_UL)
- ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
- adgh_len;
+ if (adth->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADTH)) {
+ nr_of_dg = (le16_to_cpu(adth->table_length) -
+ sizeof(struct mux_adth) +
+ sizeof(struct mux_adth_dg)) /
+ sizeof(struct mux_adth_dg);
+
+ if (nr_of_dg <= 0)
+ return payload_size;
+
+ dg = &adth->dg;
+
+ for (i = 0; i < nr_of_dg; i++, dg++) {
+ if (le32_to_cpu(dg->datagram_index) <
+ sizeof(struct mux_adbh)) {
+ return payload_size;
+ }
+ payload_size +=
+ le16_to_cpu(dg->datagram_length);
+ }
+ }
+ next_table_idx = le32_to_cpu(adth->next_table_index);
+ }
+
+ return payload_size;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ union mux_type_header hr;
+ u16 adgh_len;
+ int payload;
+
+ if (ipc_mux->protocol == MUX_LITE) {
+ hr.adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(hr.adgh->length);
+ if (hr.adgh->signature == cpu_to_le32(IOSM_AGGR_MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes =
+ ipc_mux->ul_data_pend_bytes - adgh_len;
+ } else {
+ hr.adbh = (struct mux_adbh *)(skb->data);
+ payload = ipc_mux_get_payload_from_adb(ipc_mux, hr.adbh);
+ ipc_mux->ul_data_pend_bytes -= payload;
+ }
if (ipc_mux->ul_flow == MUX_UL)
dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
/* Add session UL data to a ADB and ADGH */
ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
- if (ul_data_pend)
+ if (ul_data_pend) {
+ if (ipc_mux->protocol == MUX_AGGREGATION)
+ ipc_imem_adb_timer_start(ipc_mux->imem);
+
/* Delay the doorbell irq */
ipc_imem_td_update_timer_start(ipc_mux->imem);
-
+ }
/* reset the debounce flag */
ipc_mux->ev_mux_net_transmit_pending = false;
*/
#define MUX_QUEUE_LEVEL 1
+/* ADB finish timer value */
+#define IOSM_AGGR_MUX_ADB_FINISH_TIMEOUT_NSEC (500 * 1000)
+
+/* Enables the flow control (Flow is not allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ENABLE 5
+
+/* Disables the flow control (Flow is allowed) */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_DISABLE 6
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command
+ */
+#define IOSM_AGGR_MUX_CMD_FLOW_CTL_ACK 7
+
+/* Aggregation Protocol Command for report packet indicating link quality
+ */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT 8
+
+/* Response to a report packet */
+#define IOSM_AGGR_MUX_CMD_LINK_STATUS_REPORT_RESP 9
+
+/* ACBH: Signature of the Aggregated Command Block Header. */
+#define IOSM_AGGR_MUX_SIG_ACBH 0x48424341
+
+/* ADTH: Signature of the Aggregated Datagram Table Header. */
+#define IOSM_AGGR_MUX_SIG_ADTH 0x48544441
+
+/* ADBH: Signature of the Aggregated Data Block Header. */
+#define IOSM_AGGR_MUX_SIG_ADBH 0x48424441
+
+/* ADGH: Signature of the Datagram Header. */
+#define IOSM_AGGR_MUX_SIG_ADGH 0x48474441
+
/* Size of the buffer for the IP MUX commands. */
#define MUX_MAX_UL_ACB_BUF_SIZE 256
/* MUX UL flow control higher threshold in bytes (5ms worth of data)*/
#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
+/**
+ * struct mux_cmdh - Structure of Command Header.
+ * @signature: Signature of the Command Header.
+ * @cmd_len: Length (in bytes) of the Aggregated Command Block.
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved. Set to zero.
+ * @next_cmd_index: Index (in bytes) to the next command in the buffer.
+ * @command_type: Command Enum. See table Session Management chapter for
+ * details.
+ * @transaction_id: The Transaction ID shall be unique to the command
+ * @param: Optional parameters used with the command.
+ */
+struct mux_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 next_cmd_index;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_acbh - Structure of the Aggregated Command Block Header.
+ * @signature: Signature of the Aggregated Command Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Command Block.
+ * @first_cmd_index: Index (in bytes) to the first command in the buffer.
+ */
+struct mux_acbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_cmd_index;
+};
+
+/**
+ * struct mux_adbh - Structure of the Aggregated Data Block Header.
+ * @signature: Signature of the Aggregated Data Block Header.
+ * @reserved: Reserved bytes. Set to zero.
+ * @sequence_nr: Block sequence number.
+ * @block_length: Length (in bytes) of the Aggregated Data Block.
+ * @first_table_index: Index (in bytes) to the first Datagram Table in
+ * the buffer.
+ */
+struct mux_adbh {
+ __le32 signature;
+ __le16 reserved;
+ __le16 sequence_nr;
+ __le32 block_length;
+ __le32 first_table_index;
+};
+
+/**
+ * struct mux_adth - Structure of the Aggregated Datagram Table Header.
+ * @signature: Signature of the Aggregated Datagram Table Header.
+ * @table_length: Length (in bytes) of the datagram table.
+ * @if_id: ID of the interface the datagrams in the table
+ * belong to.
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1) hint.
+ * @reserved: Reserved bits. Set to zero.
+ * @next_table_index: Index (in bytes) to the next Datagram Table in
+ * the buffer.
+ * @reserved2: Reserved bytes. Set to zero
+ * @dg: datagramm table with variable length
+ */
+struct mux_adth {
+ __le32 signature;
+ __le16 table_length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ __le32 next_table_index;
+ __le32 reserved2;
+ struct mux_adth_dg dg;
+};
+
/**
* struct mux_adgh - Aggregated Datagram Header.
* @signature: Signature of the Aggregated Datagram Header(0x48474441)
};
/**
- * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer
- * depending on Header.
- * @ipc_mux: Pointer to MUX data-struct
- * @skb: Pointer to ipc_skb.
+ * struct mux_type_cmdh - Structure of command header for mux lite and aggr
+ * @ack_lite: MUX Lite Command Header pointer
+ * @ack_aggr: Command Header pointer
*/
+union mux_type_cmdh {
+ struct mux_lite_cmdh *ack_lite;
+ struct mux_cmdh *ack_aggr;
+};
+
+/**
+ * struct mux_type_header - Structure of mux header type
+ * @adgh: Aggregated Datagram Header pointer
+ * @adbh: Aggregated Data Block Header pointer
+ */
+union mux_type_header {
+ struct mux_adgh *adgh;
+ struct mux_adbh *adbh;
+};
+
void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
/**
* @blocking: True for blocking send
* @respond: If true return transaction ID
*
- * Returns: 0 in success and failure value on error
+ * Returns: 0 in success and failure value on error
*/
int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
u32 transaction_id, union mux_cmd_param *param,
*/
void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+void ipc_mux_ul_adb_finish(struct iosm_mux *ipc_mux);
+
+void ipc_mux_ul_adb_update_ql(struct iosm_mux *ipc_mux, struct mux_adb *p_adb,
+ int session_id, int qlth_n_ql_size,
+ struct sk_buff_head *ul_list);
+
#endif
static const struct pci_device_id iosm_ipc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7360_ID) },
{}
};
MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
/* Device ID */
#define INTEL_CP_DEVICE_7560_ID 0x7560
+#define INTEL_CP_DEVICE_7360_ID 0x7360
/* Define for BAR area usage */
#define IPC_DOORBELL_BAR0 0
return wwandev->debugfs_dir;
}
EXPORT_SYMBOL_GPL(wwan_get_debugfs_dir);
+
+static int wwan_dev_debugfs_match(struct device *dev, const void *dir)
+{
+ struct wwan_device *wwandev;
+
+ if (dev->type != &wwan_dev_type)
+ return 0;
+
+ wwandev = to_wwan_dev(dev);
+
+ return wwandev->debugfs_dir == dir;
+}
+
+static struct wwan_device *wwan_dev_get_by_debugfs(struct dentry *dir)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, dir, wwan_dev_debugfs_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
+void wwan_put_debugfs_dir(struct dentry *dir)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_debugfs(dir);
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+
+ /* wwan_dev_get_by_debugfs() also got a reference */
+ put_device(&wwandev->dev);
+ put_device(&wwandev->dev);
+}
+EXPORT_SYMBOL_GPL(wwan_put_debugfs_dir);
#endif
/* This function allocates and registers a new WWAN device OR if a WWAN device
void nvme_complete_batch_req(struct request *req)
{
+ trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
nvme_end_req_zoned(req);
}
container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl);
- ctrl->ops->submit_async_event(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
ioc->usg_calls++;
#endif
- while(sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
ccio_unmap_page(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
- while (sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
const struct j721e_pcie_data *data;
struct cdns_pcie *cdns_pcie;
struct j721e_pcie *pcie;
- struct cdns_pcie_rc *rc;
- struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc = NULL;
+ struct cdns_pcie_ep *ep = NULL;
struct gpio_desc *gpiod;
void __iomem *base;
struct clk *clk;
if (!pcie)
return -ENOMEM;
+ switch (mode) {
+ case PCI_MODE_RC:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ case PCI_MODE_EP:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &ep->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return 0;
+ }
+
pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
- if (!bridge) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
-
- if (!data->byte_access_allowed)
- bridge->ops = &cdns_ti_pcie_host_ops;
- rc = pci_host_bridge_priv(bridge);
- rc->quirk_retrain_flag = data->quirk_retrain_flag;
- rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &rc->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
- ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &ep->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
dev_err(dev, "Failed to init phy\n");
goto err_pcie_setup;
break;
- default:
- dev_err(dev, "INVALID device type %d\n", mode);
}
return 0;
return 0;
}
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
static const struct of_device_id kirin_pcie_match[] = {
- {
- .compatible = "hisilicon,kirin960-pcie",
- .data = (void *)PCIE_KIRIN_INTERNAL_PHY
- },
- {
- .compatible = "hisilicon,kirin970-pcie",
- .data = (void *)PCIE_KIRIN_EXTERNAL_PHY
- },
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
{},
};
static int kirin_pcie_probe(struct platform_device *pdev)
{
- enum pcie_kirin_phy_type phy_type;
struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
int ret;
return -EINVAL;
}
- phy_type = (long)of_device_get_match_data(dev);
- if (!phy_type) {
+ data = of_device_get_match_data(dev);
+ if (!data) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
-
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
return -ENOMEM;
pci->ops = &kirin_dw_pcie_ops;
pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
- kirin_pcie->type = phy_type;
+ kirin_pcie->type = data->phy_type;
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
if (ret)
if (!hv_dev)
continue;
- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
put_pcichild(hv_dev);
}
if (!desc)
return cpu_possible_mask;
- if (WARN_ON_ONCE(!desc->affinity))
+ /* MSI[X] interrupts can be allocated without affinity descriptor */
+ if (!desc->affinity)
return NULL;
/*
{
int ret, i;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = -1;
+
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- int status, capabilities, irq_services, i, nr_service;
- int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = {
- [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1
- };
+ int status, capabilities, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
/* Enable PCI Express port device */
status = pci_enable_device(dev);
return 0;
pci_set_master(dev);
-
- irq_services = 0;
- if (IS_ENABLED(CONFIG_PCIE_PME))
- irq_services |= PCIE_PORT_SERVICE_PME;
- if (IS_ENABLED(CONFIG_PCIEAER))
- irq_services |= PCIE_PORT_SERVICE_AER;
- if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
- irq_services |= PCIE_PORT_SERVICE_HP;
- if (IS_ENABLED(CONFIG_PCIE_DPC))
- irq_services |= PCIE_PORT_SERVICE_DPC;
- irq_services &= capabilities;
-
- if (irq_services) {
- /*
- * Initialize service IRQs. Don't use service devices that
- * require interrupts if there is no way to generate them.
- * However, some drivers may have a polling mode (e.g.
- * pciehp_poll_mode) that can be used in the absence of IRQs.
- * Allow them to determine if that is to be used.
- */
- status = pcie_init_service_irqs(dev, irqs, irq_services);
- if (status) {
- irq_services &= PCIE_PORT_SERVICE_HP;
- if (!irq_services)
- goto error_disable;
- }
+ /*
+ * Initialize service irqs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
+ */
+ status = pcie_init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_HP;
+ if (!capabilities)
+ goto error_disable;
}
/* Allocate child services if any */
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
(DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) |
(DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24));
regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1,
- DIV_ROUND_UP(priv->config.clk_pre, temp));
+ DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE));
regmap_write(priv->regmap, MIPI_DSI_HS_TIM,
DIV_ROUND_UP(priv->config.hs_exit, temp) |
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB if ARCH_BRCMSTB
- default ARCH_BCM4908
- default ARCH_BRCMSTB
+ default ARCH_BCM4908 || ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
This driver is required by the USB XHCI, EHCI and OHCI
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/suspend.h>
#include "phy-brcm-usb-init.h"
int init_count;
int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
+ struct notifier_block pm_notifier;
+ bool pm_active;
};
static s8 *node_reg_names[BRCM_REGS_MAX] = {
"crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
};
+static int brcm_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event,
+ void *unused)
+{
+ struct brcm_usb_phy_data *priv =
+ container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
+
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ priv->pm_active = true;
+ break;
+ case PM_POST_RESTORE:
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ priv->pm_active = false;
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
{
struct phy *gphy = dev_id;
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
/*
* Use a lock to make sure a second caller waits until
* the base phy is inited before using it.
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
dev_dbg(&gphy->dev, "EXIT\n");
if (phy->id == BRCM_USB_PHY_2_0)
brcm_usb_uninit_eohci(&priv->ini);
if (err)
return err;
+ priv->pm_notifier.notifier_call = brcm_pm_notifier;
+ register_pm_notifier(&priv->pm_notifier);
+
mutex_init(&priv->mutex);
/* make sure invert settings are correct */
static int brcm_usb_phy_remove(struct platform_device *pdev)
{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
+
sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
+ unregister_pm_notifier(&priv->pm_notifier);
return 0;
}
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
+ dev_dbg(dev, "SUSPEND\n");
priv->ini.wake_enabled = device_may_wakeup(dev);
if (priv->phys[BRCM_USB_PHY_3_0].inited)
brcm_usb_uninit_xhci(&priv->ini);
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ dev_dbg(dev, "RESUME\n");
if (priv->wake_irq >= 0)
disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
struct device *dev = &pdev->dev;
const struct cdns_sierra_data *data;
unsigned int id_value;
- int i, ret, node = 0;
+ int ret, node = 0;
void __iomem *base;
struct device_node *dn = dev->of_node, *child;
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- goto put_child2;
+ of_node_put(child);
+ goto put_control;
}
if (!sp->autoconf) {
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
}
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
if (sp->num_lanes > SIERRA_MAX_LANES) {
ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
- goto put_child2;
+ goto put_control;
}
/* If more than one subnode, configure the PHY as multilink */
if (!sp->autoconf && sp->nsubnodes > 1) {
ret = cdns_sierra_phy_configure_multilink(sp);
if (ret)
- goto put_child2;
+ goto put_control;
}
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-
-put_child:
- node++;
-put_child2:
- for (i = 0; i < node; i++)
- reset_control_put(sp->phys[i].lnk_rst);
- of_node_put(child);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_control;
+ }
+
+ return 0;
+
+put_control:
+ while (--node >= 0)
+ reset_control_put(sp->phys[node].lnk_rst);
clk_disable:
cdns_sierra_phy_disable_clocks(sp);
reset_control_assert(sp->apb_rst);
/* no efuse, ignore it */
if (!instance->efuse_intr &&
!instance->efuse_rx_imp &&
- !instance->efuse_rx_imp) {
+ !instance->efuse_tx_imp) {
dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
instance->efuse_sw_en = 0;
break;
cfg->clk_miss = 0;
cfg->clk_post = 60000 + 52 * ui;
- cfg->clk_pre = 8000;
+ cfg->clk_pre = 8;
cfg->clk_prepare = 38000;
cfg->clk_settle = 95000;
cfg->clk_term_en = 0;
if (cfg->clk_post < (60000 + 52 * ui))
return -EINVAL;
- if (cfg->clk_pre < 8000)
+ if (cfg->clk_pre < 8)
return -EINVAL;
if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000)
* Author: Wyon Bi <bivvy.bi@rock-chips.com>
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/iopoll.h>
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
/*
* The value of counter for HS Tlpx Time
ret = __stm32_usbphyc_pll_disable(usbphyc);
if (ret)
- return ret;
+ goto dec_n_pll_cons;
}
ret = stm32_usbphyc_regulators_enable(usbphyc);
{ .val = 1, .div = 2, },
{ .val = 2, .div = 4, },
{ .val = 3, .div = 8, },
+ { /* sentinel */ },
};
static const struct wiz_clk_div_sel clk_div_sel[] = {
#define PROT_BUS_WIDTH_10 0x0
#define PROT_BUS_WIDTH_20 0x1
#define PROT_BUS_WIDTH_40 0x2
-#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2)
+#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2)
/* Number of GT lanes */
#define NUM_LANES 4
static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
{
struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
+ u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
/* Set SGMII protocol TX and RX bus width to 10 bits. */
- xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
- xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+ xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
+ xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
xpsgtr_bypass_scrambler_8b10b(gtr_phy);
}
static int regulator_late_cleanup(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const struct regulator_ops *ops = rdev->desc->ops;
struct regulation_constraints *c = rdev->constraints;
- int enabled, ret;
+ int ret;
if (c && c->always_on)
return 0;
if (rdev->use_count)
goto unlock;
- /* If we can't read the status assume it's always on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- /* But if reading the status failed, assume that it's off. */
- if (enabled <= 0)
+ /* If reading the status failed, assume that it's off. */
+ if (_regulator_is_enabled(rdev) <= 0)
goto unlock;
if (have_full_constraints()) {
else
path_event[chpid] = PE_NONE;
}
- if (cdev)
+ if (cdev && cdev->drv && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
break;
}
static void bnx2fc_recv_frame(struct sk_buff *skb)
{
- u32 fr_len;
+ u64 crc_err;
+ u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fc_stats *stats;
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ put_cpu();
+
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
return;
}
- stats = per_cpu_ptr(lport->stats, smp_processor_id());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ fr_crc = le32_to_cpu(fr_crc(fp));
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
+ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ crc_err = (stats->InvalidCRCCount++);
+ put_cpu();
+ if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
- stats->InvalidCRCCount++;
kfree_skb(skb);
return;
}
struct hisi_sas_slot *slot,
struct hisi_sas_dq *dq,
struct hisi_sas_device *sas_dev,
- struct hisi_sas_internal_abort *abort,
- struct hisi_sas_tmf_task *tmf)
+ struct hisi_sas_internal_abort *abort)
{
struct hisi_sas_cmd_hdr *cmd_hdr_base;
int dlvry_queue_slot, dlvry_queue;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->tmf = tmf;
- slot->is_internal = tmf;
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
slot->is_internal = tmf;
/* protect task_prep and start_delivery sequence */
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL);
return 0;
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
+ struct hisi_sas_tmf_task tmf = {};
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, NULL);
+ s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
dev_err(dev, "ata disk %016llx de-reset failed\n",
SAS_ADDR(device->sas_addr));
slot->port = port;
slot->is_internal = true;
- hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort);
return 0;
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
tmf->tmf);
+
+ if (ccb)
+ ccb->task = NULL;
goto ex_err;
}
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
return;
}
break;
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
- }
}
/*See the comments for mpi_ssp_completion */
/**
* process_one_iomb - process one outbound Queue memory block
* @pm8001_ha: our hba card information
+ * @circularQ: outbound circular queue
* @piomb: IO message buffer
*/
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ int ret;
+ struct sbitmap sb_backup;
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (sbitmap_init_node(&sdev->budget_map,
- scsi_device_max_queue_depth(sdev),
- sbitmap_calculate_shift(depth),
- GFP_KERNEL, sdev->request_queue->node,
- false, true)) {
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
put_device(&starget->dev);
kfree(sdev);
goto out;
}
return SCSI_SCAN_NO_RESPONSE;
}
+
+ /*
+ * The queue_depth is often changed in ->slave_configure.
+ * Set up budget map again since memory consumption of
+ * the map depends on actual queue depth.
+ */
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
}
if (sdev->scsi_level >= SCSI_3)
}
lpc_ctrl->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpc_ctrl->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(lpc_ctrl->clk);
- }
+ if (IS_ERR(lpc_ctrl->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+ "couldn't get clock\n");
rc = clk_prepare_enable(lpc_ctrl->clk);
if (rc) {
dev_err(dev, "couldn't enable clock\n");
help
Enable support for USI block. USI (Universal Serial Interface) is an
IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
- ExynosAutoV0. USI block can be configured to provide one of the
+ ExynosAutoV9. USI block can be configured to provide one of the
following serial protocols: UART, SPI or High Speed I2C.
This driver allows one to configure USI for desired protocol, which
ret = spi_register_driver(&fbtft_driver_spi_driver); \
if (ret < 0) \
return ret; \
- return platform_driver_register(&fbtft_driver_platform_driver); \
+ ret = platform_driver_register(&fbtft_driver_platform_driver); \
+ if (ret < 0) \
+ spi_unregister_driver(&fbtft_driver_spi_driver); \
+ return ret; \
} \
\
static void __exit fbtft_driver_module_exit(void) \
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ rcu_read_lock();
service = handle_to_service(handle);
- if (WARN_ON(!service))
+ if (WARN_ON(!service)) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
user_service = (struct user_service *)service->base.userdata;
instance = user_service->instance;
- if (!instance || instance->closing)
+ if (!instance || instance->closing) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
+
+ /*
+ * As hopping around different synchronization mechanism,
+ * taking an extra reference results in simpler implementation.
+ */
+ vchiq_service_get(service);
+ rcu_read_unlock();
vchiq_log_trace(vchiq_arm_log_level,
"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
bulk_userdata);
if (status != VCHIQ_SUCCESS) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return status;
}
}
if (wait_for_completion_interruptible(&user_service->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_RETRY;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_ERROR;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
header = NULL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
if (skip_completion)
return VCHIQ_SUCCESS;
optee_unregister_devices();
optee_notif_uninit(optee);
+ teedev_close_context(optee->ctx);
/*
* The two devices have to be unregistered before we can free the
* other resources.
*/
static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
struct tee_shm *shm;
shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+ shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
}
static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
- struct optee *optee = tee_get_drvdata(ctx->teedev);
-
arg->ret_origin = TEEC_ORIGIN_COMMS;
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
- handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
}
}
-static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
- struct optee_msg_arg *arg)
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+ u32 cmd, struct optee_msg_arg *arg)
{
switch (cmd) {
case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
- handle_ffa_rpc_func_cmd(ctx, arg);
+ handle_ffa_rpc_func_cmd(ctx, optee, arg);
break;
case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
/* Interrupt delivered by now */
* above.
*/
cond_resched();
- optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
data->data0 = cmd;
data->data1 = 0;
.data2 = (u32)(shm->sec_world_id >> 32),
.data3 = shm->offset,
};
- struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
- unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
- struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+ struct optee_msg_arg *rpc_arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
return optee_ffa_yielding_call(ctx, &data, rpc_arg);
}
{
const struct ffa_dev_ops *ffa_ops;
unsigned int rpc_arg_count;
+ struct tee_shm_pool *pool;
struct tee_device *teedev;
+ struct tee_context *ctx;
struct optee *optee;
int rc;
if (!optee)
return -ENOMEM;
- optee->pool = optee_ffa_config_dyn_shm();
- if (IS_ERR(optee->pool)) {
- rc = PTR_ERR(optee->pool);
- optee->pool = NULL;
- goto err;
+ pool = optee_ffa_config_dyn_shm();
+ if (IS_ERR(pool)) {
+ rc = PTR_ERR(pool);
+ goto err_free_optee;
}
+ optee->pool = pool;
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_free_pool;
}
optee->teedev = teedev;
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_unreg_teedev;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx))
+ goto err_rhashtable_free;
+ optee->ctx = ctx;
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
- }
+ if (rc)
+ goto err_close_ctx;
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
- }
+ if (rc)
+ goto err_unregister_devices;
pr_info("initialized driver\n");
return 0;
-err:
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
+
+err_unregister_devices:
+ optee_unregister_devices();
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_rhashtable_free:
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
tee_device_unregister(optee->teedev);
- if (optee->pool)
- tee_shm_pool_free(optee->pool);
+err_free_pool:
+ tee_shm_pool_free(pool);
+err_free_optee:
kfree(optee);
return rc;
}
void optee_notif_uninit(struct optee *optee)
{
- kfree(optee->notif.bitmap);
+ bitmap_free(optee->notif.bitmap);
}
struct optee_notif {
u_int max_key;
- struct tee_context *ctx;
/* Serializes access to the elements below in this struct */
spinlock_t lock;
struct list_head db;
/**
* struct optee - main service struct
* @supp_teedev: supplicant device
+ * @teedev: client device
* @ops: internal callbacks for different ways to reach secure
* world
- * @teedev: client device
+ * @ctx: driver internal TEE context
* @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
struct tee_device *supp_teedev;
struct tee_device *teedev;
const struct optee_ops *ops;
+ struct tee_context *ctx;
union {
struct optee_smc smc;
struct optee_ffa ffa;
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
- /* Check that the memref is covered by the shm object */
- if (p->u.memref.size) {
- size_t o = p->u.memref.shm_offs +
- p->u.memref.size - 1;
-
- rc = tee_shm_get_pa(shm, o, NULL);
- if (rc)
- return rc;
- }
-
return 0;
}
}
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc(optee->ctx, sz,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
free_pages_list(call_ctx);
- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1,
+ shm = tee_shm_alloc(optee->ctx, param->a1,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
reg_pair_from_64(¶m->a1, ¶m->a2, pa);
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->notif.ctx);
+ optee_smc_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
{
- struct tee_context *ctx;
int rc;
- ctx = teedev_open(optee->teedev);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- optee->notif.ctx = ctx;
rc = request_threaded_irq(irq, notif_irq_handler,
notif_irq_thread_fn,
0, "optee_notification", optee);
if (rc)
- goto err_close_ctx;
+ return rc;
optee->smc.notif_irq = irq;
return 0;
-
-err_close_ctx:
- teedev_close_context(optee->notif.ctx);
- optee->notif.ctx = NULL;
-
- return rc;
}
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
- if (optee->notif.ctx) {
- optee_smc_stop_async_notif(optee->notif.ctx);
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ optee_smc_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
free_irq(optee->smc.notif_irq, optee);
irq_dispose_mapping(optee->smc.notif_irq);
}
-
- /*
- * The thread normally working with optee->notif.ctx was
- * stopped with free_irq() above.
- *
- * Note we're not using teedev_close_context() or
- * tee_client_close_context() since we have already called
- * tee_device_put() while initializing to avoid a circular
- * reference counting.
- */
- teedev_close_context(optee->notif.ctx);
}
}
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
+ struct tee_context *ctx;
u32 max_notif_value;
u32 sec_caps;
int rc;
optee->pool = pool;
platform_set_drvdata(pdev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx))
+ goto err_supp_uninit;
+ optee->ctx = ctx;
rc = optee_notif_init(optee, max_notif_value);
if (rc)
- goto err_supp_uninit;
+ goto err_close_ctx;
if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
unsigned int irq;
optee_unregister_devices();
err_notif_uninit:
optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
err_supp_uninit:
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
put_tty_queue(c, ldata);
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
return;
}
}
if (read_cnt(ldata)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
}
}
return false;
canon_head = smp_load_acquire(&ldata->canon_head);
- n = min(*nr + 1, canon_head - ldata->read_tail);
+ n = min(*nr, canon_head - ldata->read_tail);
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
n += N_TTY_BUF_SIZE;
c = n + found;
- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
- c = min(*nr, c);
+ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- }
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
unsigned long address;
int err;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
if (!dev->irq && (dev->id.sversion == 0xad))
dev->irq = iosapic_serial_irq(dev);
#endif
uart.port.private_data = pericom;
uart.port.iotype = UPIO_PORT;
uart.port.uartclk = 921600 * 16;
- uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ | UPF_MAGIC_MULTIPLIER;
+ uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
uart.port.set_divisor = pericom_do_set_divisor;
for (i = 0; i < nr && i < maxnr; i++) {
unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8;
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
return -ENXIO;
- vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1);
vsa.console--;
+ vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(vsa.console);
if (ret) {
return -ENXIO;
arg--;
+ arg = array_index_nospec(arg, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(arg);
console_unlock();
static void ulpi_dev_release(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(to_ulpi_dev(dev));
}
return ret;
ret = ulpi_read_id(ulpi);
- if (ret)
+ if (ret) {
+ of_node_put(ulpi->dev.of_node);
return ret;
+ }
ret = device_register(&ulpi->dev);
- if (ret)
+ if (ret) {
+ put_device(&ulpi->dev);
return ret;
+ }
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
- of_node_put(ulpi->dev.of_node);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
return retval;
}
- find_and_link_peer(hub, port1);
-
retval = component_add(&port_dev->dev, &connector_ops);
- if (retval)
+ if (retval) {
dev_warn(&port_dev->dev, "failed to add component\n");
+ device_unregister(&port_dev->dev);
+ return retval;
+ }
+
+ find_and_link_peer(hub, port1);
/*
* Enable runtime pm and hold a refernce that hub_configure()
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct phy *usb3_phy;
- int ret;
+ int ret = 0;
u32 reg;
usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
+ /*
+ * As per data book 4.2.3.2TRB Control Bit Rules section
+ *
+ * The controller autonomously checks the HWO field of a TRB to determine if the
+ * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
+ * is valid before setting the HWO field to '1'. In most systems, this means that
+ * software must update the fourth DWORD of a TRB last.
+ *
+ * However there is a possibility of CPU re-ordering here which can cause
+ * controller to observe the HWO bit set prematurely.
+ * Add a write memory barrier to prevent CPU re-ordering.
+ */
+ wmb();
trb->ctrl |= DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_enq(dep);
if (w_index != 0x5 || (w_value >> 8))
break;
interface = w_value & 0xFF;
+ if (interface >= MAX_CONFIG_INTERFACES ||
+ !os_desc_cfg->interface[interface])
+ break;
buf[6] = w_index;
count = count_ext_prop(os_desc_cfg,
interface);
static void ffs_data_closed(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles,
- ffs->eps_count);
- ffs->epfiles = NULL;
- }
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock,
+ flags);
+
+ if (epfiles)
+ ffs_epfiles_destroy(epfiles,
+ ffs->eps_count);
+
if (ffs->setup_state == FFS_SETUP_PENDING)
__ffs_ep0_stall(ffs);
} else {
static void ffs_data_clear(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
ffs_closed(ffs);
BUG_ON(ffs->gadget);
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock, flags);
+
+ /*
+ * potential race possible between ffs_func_eps_disable
+ * & ffs_epfile_release therefore maintaining a local
+ * copy of epfile will save us from use-after-free.
+ */
+ if (epfiles) {
+ ffs_epfiles_destroy(epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
static void ffs_func_eps_disable(struct ffs_function *func)
{
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = func->ffs->epfiles;
- unsigned count = func->ffs->eps_count;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ count = func->ffs->eps_count;
+ epfile = func->ffs->epfiles;
+ ep = func->eps;
while (count--) {
/* pending requests get nuked */
if (ep->ep)
static int ffs_func_eps_enable(struct ffs_function *func)
{
- struct ffs_data *ffs = func->ffs;
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = ffs->epfiles;
- unsigned count = ffs->eps_count;
+ struct ffs_data *ffs;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ ffs = func->ffs;
+ ep = func->eps;
+ epfile = ffs->epfiles;
+ count = ffs->eps_count;
while(count--) {
ep->ep->driver_data = ep;
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
rndis_set_cmplt_type *resp;
rndis_resp_t *r;
+ BufLength = le32_to_cpu(buf->InformationBufferLength);
+ BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+ if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+ (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+ return -EINVAL;
+
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
- BufLength = le32_to_cpu(buf->InformationBufferLength);
- BufOffset = le32_to_cpu(buf->InformationBufferOffset);
-
#ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset);
ret = -EBUSY;
goto out_unlock;
}
- if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
+ if (in != usb_endpoint_dir_in(ep->ep->desc)) {
dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
ret = -EINVAL;
goto out_unlock;
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
+ if (cur_role == USB_ROLE_HOST)
+ device_release_driver(host);
if (usb3->driver)
usb3_disconnect(usb3);
usb3_vbus_out(usb3, false);
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
+ if (of_property_read_u8(np, "boost-up", &hub->boost_up))
+ hub->boost_up = USB251XB_DEF_BOOST_UP;
+
cproperty_char = of_get_property(np, "manufacturer", NULL);
strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
* may be as soon as needed.
*/
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
- hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_57 = USB251XB_DEF_BOOST_57;
hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
+ { USB_DEVICE(0x2184, 0x0057) },
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x9986, 0x7523) },
{ },
static void cp210x_disable_event_mode(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
+ { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
+#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */
+#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */
+#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */
#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
help
Low-level framebuffer-based console driver.
+config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ bool "Enable legacy fbcon hardware acceleration code"
+ depends on FRAMEBUFFER_CONSOLE
+ default y if PARISC
+ default n
+ help
+ This option enables the fbcon (framebuffer text-based) hardware
+ acceleration for graphics drivers which were written for the fbdev
+ graphics interface.
+
+ On modern machines, on mainstream machines (like x86-64) or when
+ using a modern Linux distribution those fbdev drivers usually aren't used.
+ So enabling this option wouldn't have any effect, which is why you want
+ to disable this option on such newer machines.
+
+ If you compile this kernel for older machines which still require the
+ fbdev drivers, you may want to say Y.
+
+ If unsure, select n.
+
config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
bool "Map the console to the primary display device"
depends on FRAMEBUFFER_CONSOLE
}
}
+static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_copyarea area;
+
+ area.sx = sx * vc->vc_font.width;
+ area.sy = sy * vc->vc_font.height;
+ area.dx = dx * vc->vc_font.width;
+ area.dy = dy * vc->vc_font.height;
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
void fbcon_set_bitops(struct fbcon_ops *ops)
{
+ ops->bmove = bit_bmove;
ops->clear = bit_clear;
ops->putcs = bit_putcs;
ops->clear_margins = bit_clear_margins;
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
+static __inline__ void ywrap_up(struct vc_data *vc, int count);
+static __inline__ void ywrap_down(struct vc_data *vc, int count);
+static __inline__ void ypan_up(struct vc_data *vc, int count);
+static __inline__ void ypan_down(struct vc_data *vc, int count);
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
ops->graphics = 0;
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
+ !(info->flags & FBINFO_HWACCEL_DISABLED))
+ p->scrollmode = SCROLL_MOVE;
+ else /* default to something safe */
+ p->scrollmode = SCROLL_REDRAW;
+#endif
+
/*
* ++guenther: console.c:vc_allocate() relies on initializing
* vc_{cols,rows}, but we must not set those if we are only
* This system is now divided into two levels because of complications
* caused by hardware scrolling. Top level functions:
*
- * fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
+ * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
*
* handles y values in range [0, scr_height-1] that correspond to real
* screen positions. y_wrap shift means that first line of bitmap may be
* anywhere on this display. These functions convert lineoffsets to
* bitmap offsets and deal with the wrap-around case by splitting blits.
*
+ * fbcon_bmove_physical_8() -- These functions fast implementations
* fbcon_clear_physical_8() -- of original fbcon_XXX fns.
* fbcon_putc_physical_8() -- (font width != 8) may be added later
*
}
}
+static __inline__ void ywrap_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+ if (p->yscroll >= p->vrows) /* Deal with wrap */
+ p->yscroll -= p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ywrap_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) /* Deal with wrap */
+ p->yscroll += p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll += count;
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ ops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll -= p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ p->yscroll -= p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) {
+ ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll += p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+
+ if (p->yscroll < 0) {
+ p->yscroll += p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy)
+{
+ unsigned short *s = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+ unsigned short attr = 1;
+
+ do {
+ c = scr_readw(s);
+ if (attr != (c & 0xff00)) {
+ attr = c & 0xff00;
+ if (s > start) {
+ fbcon_putcs(vc, start, s - start,
+ dy, x);
+ x += s - start;
+ start = s;
+ }
+ }
+ console_conditional_schedule();
+ s++;
+ } while (s < le);
+ if (s > start)
+ fbcon_putcs(vc, start, s - start, dy, x);
+ console_conditional_schedule();
+ dy++;
+ }
+}
+
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+ struct fbcon_display *p, int line, int count, int ycount)
+{
+ int offset = ycount * vc->vc_cols;
+ unsigned short *d = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+ unsigned short *s = d + offset;
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+
+ do {
+ c = scr_readw(s);
+
+ if (c == scr_readw(d)) {
+ if (s > start) {
+ ops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s-start);
+ x += s - start + 1;
+ start = s + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+
+ scr_writew(c, d);
+ console_conditional_schedule();
+ s++;
+ d++;
+ } while (s < le);
+ if (s > start)
+ ops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s-start);
+ console_conditional_schedule();
+ if (ycount > 0)
+ line++;
+ else {
+ line--;
+ /* NOTE: We subtract two lines from these pointers */
+ s -= vc->vc_size_row;
+ d -= vc->vc_size_row;
+ }
+ }
+}
+
static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int offset)
{
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_display *p = &fb_display[vc->vc_num];
+ int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
return true;
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, t, b - t - count,
- count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- (b - count)),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_up;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, t, b - t - count,
+ count);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ywrap_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_redraw_move(vc, p, 0, t, count);
+ ypan_up_redraw(vc, t, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b,
+ vc->vc_rows - b, b);
+ } else
+ fbcon_redraw_move(vc, p, t + count, b - t - count, t);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ypan_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_up:
+ fbcon_redraw(vc, p, t, b - t - count,
+ count * vc->vc_cols);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
+ break;
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, b - 1, b - t - count,
- -count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- t),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_down;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+ -count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ywrap_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ypan_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
+ b - count);
+ ypan_down_redraw(vc, t, count);
+ if (t > 0)
+ fbcon_redraw_move(vc, p, count, t, 0);
+ } else
+ fbcon_redraw_move(vc, p, t, b - t - count, t + count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_down:
+ fbcon_redraw(vc, p, b - 1, b - t - count,
+ -count * vc->vc_cols);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
}
return false;
}
+
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+ if (!width || !height)
+ return;
+
+ /* Split blits that cross physical y_wrap case.
+ * Pathological case involves 4 blits, better to use recursive
+ * code rather than unrolled case
+ *
+ * Recursive invocations don't need to erase the cursor over and
+ * over again, so we use fbcon_bmove_rec()
+ */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
+ p->vrows - p->yscroll);
+}
+
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ u_int b;
+
+ if (sy < y_break && sy + height > y_break) {
+ b = y_break - sy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+
+ if (dy < y_break && dy + height > y_break) {
+ b = y_break - dy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+}
+
+static void updatescrollmode_accel(struct fbcon_display *p,
+ struct fb_info *info,
+ struct vc_data *vc)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ struct fbcon_ops *ops = info->fbcon_par;
+ int cap = info->flags;
+ u16 t = 0;
+ int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
+ info->fix.xpanstep);
+ int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
+ int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
+ info->var.xres_virtual);
+ int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
+ divides(ypan, vc->vc_font.height) && vyres > yres;
+ int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
+ divides(ywrap, vc->vc_font.height) &&
+ divides(vc->vc_font.height, vyres) &&
+ divides(vc->vc_font.height, yres);
+ int reading_fast = cap & FBINFO_READS_FAST;
+ int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+ int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+
+ if (good_wrap || good_pan) {
+ if (reading_fast || fast_copyarea)
+ p->scrollmode = good_wrap ?
+ SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
+ else
+ p->scrollmode = good_wrap ? SCROLL_REDRAW :
+ SCROLL_PAN_REDRAW;
+ } else {
+ if (reading_fast || (fast_copyarea && !fast_imageblit))
+ p->scrollmode = SCROLL_MOVE;
+ else
+ p->scrollmode = SCROLL_REDRAW;
+ }
+#endif
+}
+
static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
if ((yres % fh) && (vyres % fh < yres % fh))
p->vrows--;
+
+ /* update scrollmode in case hardware acceleration is used */
+ updatescrollmode_accel(p, info, vc);
}
#define PITCH(w) (((w) + 7) >> 3)
updatescrollmode(p, info, vc);
- scrollback_phys_max = 0;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_WRAP_MOVE:
+ scrollback_phys_max = p->vrows - vc->vc_rows;
+ break;
+ case SCROLL_PAN_MOVE:
+ case SCROLL_PAN_REDRAW:
+ scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
+ if (scrollback_phys_max < 0)
+ scrollback_phys_max = 0;
+ break;
+ default:
+ scrollback_phys_max = 0;
+ break;
+ }
+
scrollback_max = 0;
scrollback_current = 0;
/* Filled in by the low-level console driver */
const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ u_short scrollmode; /* Scroll Method, use fb_scrollmode() */
+#endif
u_short inverse; /* != 0 text black on white as default */
short yscroll; /* Hardware scrolling */
int vrows; /* number of virtual rows */
};
struct fbcon_ops {
+ void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
+ /*
+ * Scroll Method
+ */
+
+/* There are several methods fbcon can use to move text around the screen:
+ *
+ * Operation Pan Wrap
+ *---------------------------------------------
+ * SCROLL_MOVE copyarea No No
+ * SCROLL_PAN_MOVE copyarea Yes No
+ * SCROLL_WRAP_MOVE copyarea No Yes
+ * SCROLL_REDRAW imageblit No No
+ * SCROLL_PAN_REDRAW imageblit Yes No
+ * SCROLL_WRAP_REDRAW imageblit No Yes
+ *
+ * (SCROLL_WRAP_REDRAW is not implemented yet)
+ *
+ * In general, fbcon will choose the best scrolling
+ * method based on the rule below:
+ *
+ * Pan/Wrap > accel imageblit > accel copyarea >
+ * soft imageblit > (soft copyarea)
+ *
+ * Exception to the rule: Pan + accel copyarea is
+ * preferred over Pan + accel imageblit.
+ *
+ * The above is typical for PCI/AGP cards. Unless
+ * overridden, fbcon will never use soft copyarea.
+ *
+ * If you need to override the above rule, set the
+ * appropriate flags in fb_info->flags. For example,
+ * to prefer copyarea over imageblit, set
+ * FBINFO_READS_FAST.
+ *
+ * Other notes:
+ * + use the hardware engine to move the text
+ * (hw-accelerated copyarea() and fillrect())
+ * + use hardware-supported panning on a large virtual screen
+ * + amifb can not only pan, but also wrap the display by N lines
+ * (i.e. visible line i = physical line (i+N) % yres).
+ * + read what's already rendered on the screen and
+ * write it in a different place (this is cfb_copyarea())
+ * + re-render the text to the screen
+ *
+ * Whether to use wrapping or panning can only be figured out at
+ * runtime (when we know whether our font height is a multiple
+ * of the pan/wrap step)
+ *
+ */
+
+#define SCROLL_MOVE 0x001
+#define SCROLL_PAN_MOVE 0x002
+#define SCROLL_WRAP_MOVE 0x003
+#define SCROLL_REDRAW 0x004
+#define SCROLL_PAN_REDRAW 0x005
+
+static inline u_short fb_scrollmode(struct fbcon_display *fb)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ return fb->scrollmode;
+#else
+ /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */
+ return SCROLL_REDRAW;
+#endif
+}
+
+
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
}
}
+
+static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+
+ area.sx = sy * vc->vc_font.height;
+ area.sy = vyres - ((sx + width) * vc->vc_font.width);
+ area.dx = dy * vc->vc_font.height;
+ area.dy = vyres - ((dx + width) * vc->vc_font.width);
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
{
struct fbcon_ops *ops = info->fbcon_par;
u32 yoffset;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
int err;
yoffset = (vyres - info->var.yres) - ops->var.xoffset;
void fbcon_rotate_ccw(struct fbcon_ops *ops)
{
+ ops->bmove = ccw_bmove;
ops->clear = ccw_clear;
ops->putcs = ccw_putcs;
ops->clear_margins = ccw_clear_margins;
}
}
+
+static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sx = vxres - ((sy + height) * vc->vc_font.height);
+ area.sy = sx * vc->vc_font.width;
+ area.dx = vxres - ((dy + height) * vc->vc_font.height);
+ area.dy = dx * vc->vc_font.width;
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
static int cw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
u32 xoffset;
int err;
void fbcon_rotate_cw(struct fbcon_ops *ops)
{
+ ops->bmove = cw_bmove;
ops->clear = cw_clear;
ops->putcs = cw_putcs;
ops->clear_margins = cw_clear_margins;
#ifndef _FBCON_ROTATE_H
#define _FBCON_ROTATE_H
+#define GETVYRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \
+ (i)->var.yres : (i)->var.yres_virtual; })
+
+#define GETVXRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
+ (i)->var.xres : (i)->var.xres_virtual; })
+
+
static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat)
{
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
}
}
+
+static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sy = vyres - ((sy + height) * vc->vc_font.height);
+ area.sx = vxres - ((sx + width) * vc->vc_font.width);
+ area.dy = vyres - ((dy + height) * vc->vc_font.height);
+ area.dx = vxres - ((dx + width) * vc->vc_font.width);
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
{
struct fbcon_ops *ops = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
int err;
xoffset = vxres - info->var.xres - ops->var.xoffset;
void fbcon_rotate_ud(struct fbcon_ops *ops)
{
+ ops->bmove = ud_bmove;
ops->clear = ud_clear;
ops->putcs = ud_putcs;
ops->clear_margins = ud_clear_margins;
ret = fbcon_set_con2fb_map_ioctl(argp);
break;
case FBIOBLANK:
+ if (arg > FB_BLANK_POWERDOWN)
+ return -EINVAL;
console_lock();
lock_fb_info(info);
ret = fb_blank(info, arg);
#include <asm/types.h>
#include "fbcon.h"
+static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_tilearea area;
+
+ area.sx = sx;
+ area.sy = sy;
+ area.dx = dx;
+ area.dy = dy;
+ area.height = height;
+ area.width = width;
+
+ info->tileops->fb_tilecopy(info, &area);
+}
+
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
+ ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
}
/**
- * xxxfb_copyarea - OBSOLETE function.
+ * xxxfb_copyarea - REQUIRED function. Can use generic routines if
+ * non acclerated hardware and packed pixel based.
* Copies one area of the screen to another area.
- * Will be deleted in a future version
*
* @info: frame buffer structure that represents a single frame buffer
* @area: Structure providing the data to copy the framebuffer contents
* from one region to another.
*
- * This drawing operation copied a rectangular area from one area of the
+ * This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
.fb_setcolreg = xxxfb_setcolreg,
.fb_blank = xxxfb_blank,
.fb_pan_display = xxxfb_pan_display,
- .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
- .fb_copyarea = xxxfb_copyarea, /* Obsolete */
- .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
+ .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
+ .fb_copyarea = xxxfb_copyarea, /* Needed !!! */
+ .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
.fb_cursor = xxxfb_cursor, /* Optional !!! */
.fb_sync = xxxfb_sync,
.fb_ioctl = xxxfb_ioctl,
};
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
-static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
+static LIST_HEAD(dev_domain_list);
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
{
dentry, dentry, from_kuid(&init_user_ns, uid),
any);
ret = NULL;
-
- if (d_inode(dentry))
- ret = v9fs_fid_find_inode(d_inode(dentry), uid);
-
/* we'll recheck under lock if there's anything to look in */
- if (!ret && dentry->d_fsdata) {
+ if (dentry->d_fsdata) {
struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata;
spin_lock(&dentry->d_lock);
}
}
spin_unlock(&dentry->d_lock);
+ } else {
+ if (dentry->d_inode)
+ ret = v9fs_fid_find_inode(dentry->d_inode, uid);
}
return ret;
config SMBFS_COMMON
tristate
- default y if CIFS=y
- default m if CIFS=m
+ default y if CIFS=y || SMB_SERVER=y
+ default m if CIFS=m || SMB_SERVER=m
source "fs/coda/Kconfig"
source "fs/afs/Kconfig"
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
- if (alignment > ELF_MIN_ALIGN) {
+ if (interpreter || alignment > ELF_MIN_ALIGN) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
};
MODULE_ALIAS_FS("binfmt_misc");
-static struct ctl_table_header *binfmt_misc_header;
-
static int __init init_misc_binfmt(void)
{
int err = register_filesystem(&bm_fs_type);
if (!err)
insert_binfmt(&misc_format);
- binfmt_misc_header = register_sysctl_mount_point("fs/binfmt_misc");
- return 0;
+ return err;
}
static void __exit exit_misc_binfmt(void)
{
- unregister_sysctl_table(binfmt_misc_header);
unregister_binfmt(&misc_format);
unregister_filesystem(&bm_fs_type);
}
{
if (refcount_dec_and_test(&cache->refs)) {
WARN_ON(cache->pinned > 0);
- WARN_ON(cache->reserved > 0);
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on reserved > 0 in that
+ * case.
+ */
+ if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
+ WARN_ON(cache->reserved > 0);
/*
* A block_group shouldn't be on the discard_list anymore.
int ret;
bool dirty_bg_running;
+ /*
+ * This can only happen when we are doing read-only scrub on read-only
+ * mount.
+ * In that case we should not start a new transaction on read-only fs.
+ * Thus here we skip all chunk allocations.
+ */
+ if (sb_rdonly(fs_info->sb)) {
+ mutex_lock(&fs_info->ro_block_group_mutex);
+ ret = inc_block_group_ro(cache, 0);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+ return ret;
+ }
+
do {
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
* important and indicates a real bug if this happens.
*/
if (WARN_ON(space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
space_info->bytes_may_use > 0))
btrfs_dump_space_info(info, space_info, 0, 0);
+
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on bytes_reserved > 0 in
+ * that case.
+ */
+ if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
+ if (WARN_ON(space_info->bytes_reserved > 0))
+ btrfs_dump_space_info(info, space_info, 0, 0);
+ }
+
WARN_ON(space_info->reclaim_size > 0);
list_del(&space_info->list);
btrfs_sysfs_remove_space_info(space_info);
BTRFS_FS_STATE_DUMMY_FS_INFO,
BTRFS_FS_STATE_NO_CSUMS,
+
+ /* Indicates there was an error cleaning up a log tree. */
+ BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
};
#define BTRFS_BACKREF_REV_MAX 256
#define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
&(fs_info)->fs_state)))
+#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
+ (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
+ &(fs_info)->fs_state)))
__printf(5, 6)
__cold
goto fail;
}
- spin_lock(&fs_info->trans_lock);
- list_add(&pending_snapshot->list,
- &trans->transaction->pending_snapshots);
- spin_unlock(&fs_info->trans_lock);
+ trans->pending_snapshot = pending_snapshot;
ret = btrfs_commit_transaction(trans);
if (ret)
if (em->generation < newer_than)
goto next;
+ /* This em is under writeback, no need to defrag */
+ if (em->generation == (u64)-1)
+ goto next;
+
/*
* Our start offset might be in the middle of an existing extent
* map, so take that into account.
ret = 0;
break;
}
+ cond_resched();
}
if (ra_allocated)
struct block_device *bdev = NULL;
fmode_t mode;
int ret;
- bool cancel;
+ bool cancel = false;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
struct btrfs_trans_handle *trans = NULL;
int ret = 0;
+ /*
+ * We need to have subvol_sem write locked, to prevent races between
+ * concurrent tasks trying to disable quotas, because we will unlock
+ * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ */
+ lockdep_assert_held_write(&fs_info->subvol_sem);
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+
+ /*
+ * Request qgroup rescan worker to complete and wait for it. This wait
+ * must be done before transaction start for quota disable since it may
+ * deadlock with transaction by the qgroup rescan worker.
+ */
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
/*
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
goto out;
}
if (!fs_info->quota_root)
goto out;
- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
ret = -EINVAL;
+ } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+ /* Quota disable is in progress */
+ ret = -EBUSY;
}
if (ret) {
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
+ btrfs_err(fs_info,
+ "send: IO error at offset %llu for inode %llu root %llu",
+ page_offset(page), sctx->cur_ino,
+ sctx->send_root->root_key.objectid);
put_page(page);
ret = -EIO;
break;
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
/*
- * We use writeback_inodes_sb here because if we used
+ * We use try_to_writeback_inodes_sb() here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
* Currently are holding the fs freeze lock, if we do an async flush
* we'll do btrfs_join_transaction() and deadlock because we need to
* wait for the fs freeze lock. Using the direct flushing we benefit
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
+ *
+ * Note that try_to_writeback_inodes_sb() will only trigger writeback
+ * if it can read lock sb->s_umount. It will always be able to lock it,
+ * except when the filesystem is being unmounted or being frozen, but in
+ * those cases sync_filesystem() is called, which results in calling
+ * writeback_inodes_sb() while holding a write lock on sb->s_umount.
+ * Note that we don't call writeback_inodes_sb() directly, because it
+ * will emit a warning if sb->s_umount is not locked.
*/
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+ try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
return 0;
}
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
+/*
+ * Add a pending snapshot associated with the given transaction handle to the
+ * respective handle. This must be called after the transaction commit started
+ * and while holding fs_info->trans_lock.
+ * This serves to guarantee a caller of btrfs_commit_transaction() that it can
+ * safely free the pending snapshot pointer in case btrfs_commit_transaction()
+ * returns an error.
+ */
+static void add_pending_snapshot(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_transaction *cur_trans = trans->transaction;
+
+ if (!trans->pending_snapshot)
+ return;
+
+ lockdep_assert_held(&trans->fs_info->trans_lock);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
+
+ list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
+}
+
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
+ add_pending_snapshot(trans);
+
spin_unlock(&fs_info->trans_lock);
refcount_inc(&cur_trans->use_count);
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/
spin_lock(&fs_info->trans_lock);
+ add_pending_snapshot(trans);
cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait,
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ /* Set by a task that wants to create a snapshot. */
+ struct btrfs_pending_snapshot *pending_snapshot;
refcount_t use_count;
unsigned int type;
/*
struct btrfs_key *key, int slot)
{
struct btrfs_dev_item *ditem;
+ const u32 item_size = btrfs_item_size(leaf, slot);
if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
dev_item_err(leaf, slot,
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
+
+ if (unlikely(item_size != sizeof(*ditem))) {
+ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*ditem));
+ return -EUCLEAN;
+ }
+
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
dev_item_err(leaf, slot,
struct btrfs_inode_item *iitem;
u64 super_gen = btrfs_super_generation(fs_info->super_copy);
u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
+ const u32 item_size = btrfs_item_size(leaf, slot);
u32 mode;
int ret;
u32 flags;
if (unlikely(ret < 0))
return ret;
+ if (unlikely(item_size != sizeof(*iitem))) {
+ generic_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*iitem));
+ return -EUCLEAN;
+ }
+
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
/* Here we use super block generation + 1 to handle log tree */
if (log->node) {
ret = walk_log_tree(trans, log, &wc);
if (ret) {
+ /*
+ * We weren't able to traverse the entire log tree, the
+ * typical scenario is getting an -EIO when reading an
+ * extent buffer of the tree, due to a previous writeback
+ * failure of it.
+ */
+ set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
+ &log->fs_info->fs_state);
+
+ /*
+ * Some extent buffers of the log tree may still be dirty
+ * and not yet written back to storage, because we may
+ * have updates to a log tree without syncing a log tree,
+ * such as during rename and link operations. So flush
+ * them out and wait for their writeback to complete, so
+ * that we properly cleanup their state and pages.
+ */
+ btrfs_write_marked_extents(log->fs_info,
+ &log->dirty_log_pages,
+ EXTENT_DIRTY | EXTENT_NEW);
+ btrfs_wait_tree_log_extents(log,
+ EXTENT_DIRTY | EXTENT_NEW);
+
if (trans)
btrfs_abort_transaction(trans, ret);
else
return ret;
}
+/*
+ * Query the occupancy of the cache in a region, returning where the next chunk
+ * of data starts and how long it is.
+ */
+static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len)
+{
+ struct cachefiles_object *object;
+ struct file *file;
+ loff_t off, off2;
+
+ *_data_start = -1;
+ *_data_len = 0;
+
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ return -ENOBUFS;
+
+ object = cachefiles_cres_object(cres);
+ file = cachefiles_cres_file(cres);
+ granularity = max_t(size_t, object->volume->cache->bsize, granularity);
+
+ _enter("%pD,%li,%llx,%zx/%llx",
+ file, file_inode(file)->i_ino, start, len,
+ i_size_read(file_inode(file)));
+
+ off = cachefiles_inject_read_error();
+ if (off == 0)
+ off = vfs_llseek(file, start, SEEK_DATA);
+ if (off == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off < 0 && off >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+ if (round_up(off, granularity) >= start + len)
+ return -ENODATA; /* No data in range */
+
+ off2 = cachefiles_inject_read_error();
+ if (off2 == 0)
+ off2 = vfs_llseek(file, off, SEEK_HOLE);
+ if (off2 == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+
+ /* Round away partial blocks */
+ off = round_up(off, granularity);
+ off2 = round_down(off2, granularity);
+ if (off2 <= off)
+ return -ENODATA;
+
+ *_data_start = off;
+ if (off2 > start + len)
+ *_data_len = len;
+ else
+ *_data_len = off2 - off;
+ return 0;
+}
+
/*
* Handle completion of a write to the cache.
*/
.write = cachefiles_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
+ .query_occupancy = cachefiles_query_occupancy,
};
/*
switch (state) {
case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
- cifs_reconnect(swnreg->tcon->ses->server, true);
+ cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
break;
case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
- cifs_reconnect(swnreg->tcon->ses->server, true);
+ cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
break;
case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
goto unlock;
}
- cifs_reconnect(tcon->ses->server, false);
+ cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, false);
unlock:
mutex_unlock(&tcon->ses->server->srv_mutex);
mutex_unlock(&server->srv_mutex);
}
-/**
+/*
* Mark all sessions and tcons for reconnect.
*
* @server needs to be previously set to CifsNeedReconnect.
struct TCP_Server_Info *pserver;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct mid_q_entry *mid, *nmid;
- struct list_head retry_list;
-
- server->maxBuf = 0;
- server->max_read = 0;
/*
* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
spin_unlock(&ses->chan_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
+}
+
+static void
+cifs_abort_connection(struct TCP_Server_Info *server)
+{
+ struct mid_q_entry *mid, *nmid;
+ struct list_head retry_list;
+
+ server->maxBuf = 0;
+ server->max_read = 0;
/* do not want to be sending data on a socket we are freeing */
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+ cifs_abort_connection(server);
+
do {
try_to_freeze();
mutex_lock(&server->srv_mutex);
cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+ cifs_abort_connection(server);
+
do {
try_to_freeze();
mutex_lock(&server->srv_mutex);
if (server->tcpStatus == CifsNeedReconnect) {
spin_unlock(&cifs_tcp_ses_lock);
+ cifs_reconnect(server, false);
return -ECONNABORTED;
}
spin_unlock(&cifs_tcp_ses_lock);
int i;
for (i = 1; i < chan_count; i++) {
- /*
- * note: for now, we're okay accessing ses->chans
- * without chan_lock. But when chans can go away, we'll
- * need to introduce ref counting to make sure that chan
- * is not freed from under us.
- */
+ spin_unlock(&ses->chan_lock);
cifs_put_tcp_session(ses->chans[i].server, 0);
+ spin_lock(&ses->chan_lock);
ses->chans[i].server = NULL;
}
}
}
}
+ ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
+ if (!ctx->workstation_name) {
+ cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
+ rc = -ENOMEM;
+ kfree(ctx->username);
+ ctx->username = NULL;
+ kfree_sensitive(ctx->password);
+ ctx->password = NULL;
+ kfree(ctx->domainname);
+ ctx->domainname = NULL;
+ goto out_key_put;
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
- } else {
+ } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
+ (strcmp(ses->server->vals->version_string,
+ SMB3ANY_VERSION_STRING) == 0) ||
+ (strcmp(ses->server->vals->version_string,
+ SMBDEFAULT_VERSION_STRING) == 0)) {
cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
rc = -EOPNOTSUPP;
goto out_fail;
+ } else {
+ cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+ "disabled but required for POSIX extensions\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
}
}
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_reconnect(tcon->ses->server, true);
+ cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, true);
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
- lru_cache_add(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page);
} else
SetPageError(page);
- unlock_page(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
+ unlock_page(page);
+
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
put_page(page);
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_SIZE);
- lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
continue;
} else {
/* no need to hold page hostage */
- lru_cache_add(page);
unlock_page(page);
put_page(page);
rdata->pages[i] = NULL;
return readpages_fill_pages(server, rdata, iter, iter->count);
}
-static int
-readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
- unsigned int rsize, struct list_head *tmplist,
- unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
-{
- struct page *page, *tpage;
- unsigned int expected_index;
- int rc;
- gfp_t gfp = readahead_gfp_mask(mapping);
-
- INIT_LIST_HEAD(tmplist);
-
- page = lru_to_page(page_list);
-
- /*
- * Lock the page and put it in the cache. Since no one else
- * should have access to this page, we're safe to simply set
- * PG_locked without checking it first.
- */
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping,
- page->index, gfp);
-
- /* give up if we can't stick it in the cache */
- if (rc) {
- __ClearPageLocked(page);
- return rc;
- }
-
- /* move first page to the tmplist */
- *offset = (loff_t)page->index << PAGE_SHIFT;
- *bytes = PAGE_SIZE;
- *nr_pages = 1;
- list_move_tail(&page->lru, tmplist);
-
- /* now try and add more pages onto the request */
- expected_index = page->index + 1;
- list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
- /* discontinuity ? */
- if (page->index != expected_index)
- break;
-
- /* would this page push the read over the rsize? */
- if (*bytes + PAGE_SIZE > rsize)
- break;
-
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
- if (rc) {
- __ClearPageLocked(page);
- break;
- }
- list_move_tail(&page->lru, tmplist);
- (*bytes) += PAGE_SIZE;
- expected_index++;
- (*nr_pages)++;
- }
- return rc;
-}
-
-static int cifs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *page_list, unsigned num_pages)
+static void cifs_readahead(struct readahead_control *ractl)
{
int rc;
- int err = 0;
- struct list_head tmplist;
- struct cifsFileInfo *open_file = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifsFileInfo *open_file = ractl->file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct TCP_Server_Info *server;
pid_t pid;
- unsigned int xid;
+ unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
+ pgoff_t next_cached = ULONG_MAX;
+ bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
+ cifs_inode_cookie(ractl->mapping->host)->cache_priv;
+ bool check_cache = caching;
xid = get_xid();
- /*
- * Reads as many pages as possible from fscache. Returns -ENOBUFS
- * immediately if the cookie is negative
- *
- * After this point, every page in the list might have PG_fscache set,
- * so we will need to clean that up off of every page we don't use.
- */
- rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
- &num_pages);
- if (rc == 0) {
- free_xid(xid);
- return rc;
- }
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
- __func__, file, mapping, num_pages);
+ __func__, ractl->file, ractl->mapping, readahead_count(ractl));
/*
- * Start with the page at end of list and move it to private
- * list. Do the same with any following pages until we hit
- * the rsize limit, hit an index discontinuity, or run out of
- * pages. Issue the async read and then start the loop again
- * until the list is empty.
- *
- * Note that list order is important. The page_list is in
- * the order of declining indexes. When we put the pages in
- * the rdata->pages, then we want them in increasing order.
+ * Chop the readahead request up into rsize-sized read requests.
*/
- while (!list_empty(page_list) && !err) {
- unsigned int i, nr_pages, bytes, rsize;
- loff_t offset;
- struct page *page, *tpage;
+ while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
+ unsigned int i, got, rsize;
+ struct page *page;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
+ pgoff_t index = readahead_index(ractl) + last_batch_size;
+
+ /*
+ * Find out if we have anything cached in the range of
+ * interest, and if so, where the next chunk of cached data is.
+ */
+ if (caching) {
+ if (check_cache) {
+ rc = cifs_fscache_query_occupancy(
+ ractl->mapping->host, index, nr_pages,
+ &next_cached, &cache_nr_pages);
+ if (rc < 0)
+ caching = false;
+ check_cache = false;
+ }
+
+ if (index == next_cached) {
+ /*
+ * TODO: Send a whole batch of pages to be read
+ * by the cache.
+ */
+ page = readahead_page(ractl);
+ last_batch_size = 1 << thp_order(page);
+ if (cifs_readpage_from_fscache(ractl->mapping->host,
+ page) < 0) {
+ /*
+ * TODO: Deal with cache read failure
+ * here, but for the moment, delegate
+ * that to readpage.
+ */
+ caching = false;
+ }
+ unlock_page(page);
+ next_cached++;
+ cache_nr_pages--;
+ if (cache_nr_pages == 0)
+ check_cache = true;
+ continue;
+ }
+ }
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
+ if (rc) {
+ if (rc == -EAGAIN)
+ continue;
break;
+ }
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits);
if (rc)
break;
+ nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
+ nr_pages = min_t(size_t, nr_pages, next_cached - index);
/*
* Give up immediately if rsize is too small to read an entire
* reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page.
*/
- if (unlikely(rsize < PAGE_SIZE)) {
- add_credits_and_wake_if(server, credits, 0);
- free_xid(xid);
- return 0;
- }
-
- nr_pages = 0;
- err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
- &nr_pages, &offset, &bytes);
- if (!nr_pages) {
+ if (unlikely(!nr_pages)) {
add_credits_and_wake_if(server, credits, 0);
break;
}
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- lru_cache_add(page);
- unlock_page(page);
- put_page(page);
- }
- rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->server = server;
- rdata->mapping = mapping;
- rdata->offset = offset;
- rdata->bytes = bytes;
- rdata->pid = pid;
- rdata->pagesz = PAGE_SIZE;
- rdata->tailsz = PAGE_SIZE;
+ got = __readahead_batch(ractl, rdata->pages, nr_pages);
+ if (got != nr_pages) {
+ pr_warn("__readahead_batch() returned %u/%u\n",
+ got, nr_pages);
+ nr_pages = got;
+ }
+
+ rdata->nr_pages = nr_pages;
+ rdata->bytes = readahead_batch_length(ractl);
+ rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->server = server;
+ rdata->mapping = ractl->mapping;
+ rdata->offset = readahead_pos(ractl);
+ rdata->pid = pid;
+ rdata->pagesz = PAGE_SIZE;
+ rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
- rdata->credits = credits_on_stack;
-
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- rdata->pages[rdata->nr_pages++] = page;
- }
+ rdata->credits = credits_on_stack;
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
-
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
- lru_cache_add(page);
unlock_page(page);
put_page(page);
}
}
kref_put(&rdata->refcount, cifs_readdata_release);
+ last_batch_size = nr_pages;
}
free_xid(xid);
- return rc;
}
/*
* In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
* so this method should never be called.
*
- * Direct IO is not yet supported in the cached mode.
+ * Direct IO is not yet supported in the cached mode.
*/
static ssize_t
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
- .readpages = cifs_readpages,
+ .readahead = cifs_readahead,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
}
}
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
+{
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+ if (ops)
+ ops->end_operation(cres);
+}
+
/*
- * Retrieve a page from FS-Cache
+ * Fallback page reading interface.
*/
-int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{
- cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
- __func__, CIFS_I(inode)->fscache, page, inode);
- return -ENOBUFS; // Needs conversion to using netfslib
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ int ret;
+
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
+ NULL, NULL);
+ fscache_end_operation(&cres);
+ return ret;
}
/*
- * Retrieve a set of pages from FS-Cache
+ * Fallback page writing interface.
*/
-int __cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+ bool no_space_allocated_yet)
{
- cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n",
- __func__, CIFS_I(inode)->fscache, *nr_pages, inode);
- return -ENOBUFS; // Needs conversion to using netfslib
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ loff_t start = page_offset(page);
+ size_t len = PAGE_SIZE;
+ int ret;
+
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+
+ ret = fscache_begin_write_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+ no_space_allocated_yet);
+ if (ret == 0)
+ ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
+ fscache_end_operation(&cres);
+ return ret;
}
-void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+/*
+ * Retrieve a page from FS-Cache
+ */
+int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
+ int ret;
- WARN_ON(!cifsi->fscache);
+ cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
+ __func__, cifs_inode_cookie(inode), page, inode);
+ ret = fscache_fallback_read_page(inode, page);
+ if (ret < 0)
+ return ret;
+
+ /* Read completed synchronously */
+ SetPageUptodate(page);
+ return 0;
+}
+
+void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+{
cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
- __func__, cifsi->fscache, page, inode);
+ __func__, cifs_inode_cookie(inode), page, inode);
+
+ fscache_fallback_write_page(inode, page, true);
+}
+
+/*
+ * Query the cache occupancy.
+ */
+int __cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
+{
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ loff_t start, data_start;
+ size_t len, data_len;
+ int ret;
- // Needs conversion to using netfslib
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ start = first * PAGE_SIZE;
+ len = nr_pages * PAGE_SIZE;
+ ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
+ &data_start, &data_len);
+ if (ret == 0) {
+ *_data_first = data_start / PAGE_SIZE;
+ *_data_nr_pages = len / PAGE_SIZE;
+ }
+
+ fscache_end_operation(&cres);
+ return ret;
}
#ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H
+#include <linux/swap.h>
#include <linux/fscache.h>
#include "cifsglob.h"
}
-extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
-extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
-extern int __cifs_readpages_from_fscache(struct inode *,
- struct address_space *,
- struct list_head *,
- unsigned *);
-extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
-
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
{
return CIFS_I(inode)->fscache;
i_size_read(inode), flags);
}
-static inline int cifs_readpage_from_fscache(struct inode *inode,
- struct page *page)
-{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpage_from_fscache(inode, page);
+extern int __cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages);
- return -ENOBUFS;
+static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
+{
+ if (!cifs_inode_cookie(inode))
+ return -ENOBUFS;
+ return __cifs_fscache_query_occupancy(inode, first, nr_pages,
+ _data_first, _data_nr_pages);
}
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
+extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
+
+
+static inline int cifs_readpage_from_fscache(struct inode *inode,
+ struct page *page)
{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpages_from_fscache(inode, mapping, pages,
- nr_pages);
+ if (cifs_inode_cookie(inode))
+ return __cifs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page)
{
- if (PageFsCache(page))
+ if (cifs_inode_cookie(inode))
__cifs_readpage_to_fscache(inode, page);
}
+static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ if (PageFsCache(page)) {
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ wait_on_page_fscache(page);
+ fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
+ }
+ return true;
+}
+
#else /* CONFIG_CIFS_FSCACHE */
static inline
void cifs_fscache_fill_coherency(struct inode *inode,
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
-static inline int
-cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
{
+ *_data_first = ULONG_MAX;
+ *_data_nr_pages = 0;
return -ENOBUFS;
}
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static inline int
+cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
return -ENOBUFS;
}
-static inline void cifs_readpage_to_fscache(struct inode *inode,
- struct page *page) {}
+static inline
+void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
+
+static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ return true; /* May release page */
+}
#endif /* CONFIG_CIFS_FSCACHE */
static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
+ struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
__func__, cifs_i->uniqueid);
set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
+ /* Invalidate fscache cookie */
+ cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
+ fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
}
/*
int
cifs_invalidate_mapping(struct inode *inode)
{
- struct cifs_fscache_inode_coherency_data cd;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
int rc = 0;
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
__func__, inode);
}
- cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
- fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
return rc;
}
else
sz += sizeof(__le16);
- sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN);
+ if (ses->workstation_name)
+ sz += sizeof(__le16) * strnlen(ses->workstation_name,
+ CIFS_MAX_WORKSTATION_LEN);
+ else
+ sz += sizeof(__le16);
return sz;
}
spin_unlock(&GlobalMid_Lock);
if (reconnect) {
- spin_lock(&cifs_tcp_ses_lock);
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, false);
}
return mid;
* be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB
*/
- spin_lock(&cifs_tcp_ses_lock);
- if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&cifs_tcp_ses_lock);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, false);
trace_smb3_partial_send_reconnect(server->CurrentMid,
server->conn_id, server->hostname);
}
return ret;
iomap->offset = map.m_la;
- if (flags & IOMAP_DAX) {
+ if (flags & IOMAP_DAX)
iomap->dax_dev = mdev.m_daxdev;
- iomap->offset += mdev.m_dax_part_off;
- } else {
+ else
iomap->bdev = mdev.m_bdev;
- }
iomap->length = map.m_llen;
iomap->flags = 0;
iomap->private = NULL;
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
+ if (flags & IOMAP_DAX)
+ iomap->addr += mdev.m_dax_part_off;
}
return 0;
}
return false;
}
-static void z_erofs_decompressqueue_work(struct work_struct *work);
-static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
- bool sync, int bios)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
-
- /* wake up the caller thread for sync decompression */
- if (sync) {
- unsigned long flags;
-
- spin_lock_irqsave(&io->u.wait.lock, flags);
- if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
- return;
- }
-
- if (atomic_add_return(bios, &io->pending_bios))
- return;
- /* Use workqueue and sync decompression for atomic contexts only */
- if (in_atomic() || irqs_disabled()) {
- queue_work(z_erofs_workqueue, &io->u.work);
- /* enable sync decompression for readahead */
- if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
- sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
- return;
- }
- z_erofs_decompressqueue_work(&io->u.work);
-}
-
static bool z_erofs_page_is_invalidated(struct page *page)
{
return !page->mapping && !z_erofs_is_shortlived_page(page);
}
-static void z_erofs_decompressqueue_endio(struct bio *bio)
-{
- tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
- struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
- blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
-
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- if (err)
- SetPageError(page);
-
- if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
- if (!err)
- SetPageUptodate(page);
- unlock_page(page);
- }
- }
- z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
- bio_put(bio);
-}
-
static int z_erofs_decompress_pcluster(struct super_block *sb,
struct z_erofs_pcluster *pcl,
struct page **pagepool)
kvfree(bgq);
}
+static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ bool sync, int bios)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
+
+ /* wake up the caller thread for sync decompression */
+ if (sync) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&io->u.wait.lock, flags);
+ if (!atomic_add_return(bios, &io->pending_bios))
+ wake_up_locked(&io->u.wait);
+ spin_unlock_irqrestore(&io->u.wait.lock, flags);
+ return;
+ }
+
+ if (atomic_add_return(bios, &io->pending_bios))
+ return;
+ /* Use workqueue and sync decompression for atomic contexts only */
+ if (in_atomic() || irqs_disabled()) {
+ queue_work(z_erofs_workqueue, &io->u.work);
+ /* enable sync decompression for readahead */
+ if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
+ return;
+ }
+ z_erofs_decompressqueue_work(&io->u.work);
+}
+
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr,
struct page **pagepool,
qtail[JQ_BYPASS] = &pcl->next;
}
+static void z_erofs_decompressqueue_endio(struct bio *bio)
+{
+ tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
+ struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
+ blk_status_t err = bio->bi_status;
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+
+ DBG_BUGON(PageUptodate(page));
+ DBG_BUGON(z_erofs_page_is_invalidated(page));
+
+ if (err)
+ SetPageError(page);
+
+ if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
+ if (!err)
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ }
+ z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
+ bio_put(bio);
+}
+
static void z_erofs_submit_queue(struct super_block *sb,
struct z_erofs_decompress_frontend *f,
struct page **pagepool,
if (endoff >= m.clusterofs) {
m.headtype = m.type;
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ /*
+ * For ztailpacking files, in order to inline data more
+ * effectively, special EOF lclusters are now supported
+ * which can have three parts at most.
+ */
+ if (ztailpacking && end > inode->i_size)
+ end = inode->i_size;
break;
}
/* m.lcn should be >= 1 if endoff < m.clusterofs */
/*
* Inode operation get_posix_acl().
*
- * inode->i_mutex: don't care
+ * inode->i_rwsem: don't care
*/
struct posix_acl *
ext4_get_acl(struct inode *inode, int type, bool rcu)
/*
* Set the access or default ACL of an inode.
*
- * inode->i_mutex: down unless called from ext4_new_inode
+ * inode->i_rwsem: down unless called from ext4_new_inode
*/
static int
__ext4_set_acl(handle_t *handle, struct inode *inode, int type,
/*
* Initialize the ACLs of a new inode. Called from ext4_new_inode.
*
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
+ * dir->i_rwsem: down
+ * inode->i_rwsem: up (access to inode is still exclusive)
*/
int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_mutex even when reading would cause contention
+ * data. Taking i_rwsem even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
spinlock_t s_fc_lock;
struct buffer_head *s_fc_bh;
struct ext4_fc_stats s_fc_stats;
+ tid_t s_fc_ineligible_tid;
#ifdef CONFIG_EXT4_DEBUG
int s_fc_debug_max_replay;
#endif
enum {
EXT4_MF_MNTDIR_SAMPLED,
EXT4_MF_FS_ABORTED, /* Fatal error detected */
- EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
- EXT4_MF_FC_COMMITTING /* File system underoing a fast
- * commit.
- */
+ EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
};
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
struct dentry *dentry);
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle);
void ext4_fc_start_update(struct inode *inode);
void ext4_fc_stop_update(struct inode *inode);
void ext4_fc_del(struct inode *inode);
int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
int __init ext4_fc_init_dentry_cache(void);
void ext4_fc_destroy_dentry_cache(void);
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ int len, int replay);
/* mballoc.c */
extern const struct seq_operations ext4_mb_seq_groups_ops;
#define EXT4_FREECLUSTERS_WATERMARK 0
#endif
-/* Update i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_disksize. Requires i_rwsem to avoid races with truncate */
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
{
WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
up_write(&EXT4_I(inode)->i_data_sem);
}
-/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_size, i_disksize. Requires i_rwsem to avoid races with truncate */
static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
{
int changed = 0;
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
- * i_mutex for direct I/O reads. This only works for extent-based
+ * i_rwsem for direct I/O reads. This only works for extent-based
* files, and it doesn't work if data journaling is enabled, since the
* dioread_nolock code uses b_private to pass information back to the
* I/O completion handler, and this conflicts with the jbd's use of
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
ext4_discard_preallocations(inode, 0);
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
/* Preallocate the range including the unaligned edges */
goto out;
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
ret = PTR_ERR(handle);
goto out_mmap;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode, 0);
ret = PTR_ERR(handle);
goto out_mmap;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
* stuff such as page-cache locking consistency, bh mapping consistency or
* extent's data copying must be performed by caller.
* Locking:
- * i_mutex is held for both inodes
+ * i_rwsem is held for both inodes
* i_data_sem is locked for write for both inodes
* Assumptions:
* All pages from requested range are locked for both inodes
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, 0);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ 0, path[j].p_block, 1, 1);
}
ext4_ext_drop_refs(path);
kfree(path);
}
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ map.m_lblk, map.m_pblk, map.m_len, 1);
}
cur = cur + map.m_len;
}
}
/*
- * Mark file system as fast commit ineligible. This means that next commit
- * operation would result in a full jbd2 commit.
+ * Mark file system as fast commit ineligible, and record latest
+ * ineligible transaction tid. This means until the recorded
+ * transaction, commit operation would result in a full jbd2 commit.
*/
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason)
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ tid_t tid;
if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
return;
ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (handle && !IS_ERR(handle))
+ tid = handle->h_transaction->t_tid;
+ else {
+ read_lock(&sbi->s_journal->j_state_lock);
+ tid = sbi->s_journal->j_running_transaction ?
+ sbi->s_journal->j_running_transaction->t_tid : 0;
+ read_unlock(&sbi->s_journal->j_state_lock);
+ }
+ spin_lock(&sbi->s_fc_lock);
+ if (sbi->s_fc_ineligible_tid < tid)
+ sbi->s_fc_ineligible_tid = tid;
+ spin_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
}
spin_lock(&sbi->s_fc_lock);
if (list_empty(&EXT4_I(inode)->i_fc_list))
list_add_tail(&EXT4_I(inode)->i_fc_list,
- (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING)) ?
+ (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
&sbi->s_fc_q[FC_Q_STAGING] :
&sbi->s_fc_q[FC_Q_MAIN]);
spin_unlock(&sbi->s_fc_lock);
mutex_unlock(&ei->i_fc_lock);
node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
if (!node) {
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
mutex_lock(&ei->i_fc_lock);
return -ENOMEM;
}
if (!node->fcd_name.name) {
kmem_cache_free(ext4_fc_dentry_cachep, node);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_NOMEM);
+ EXT4_FC_REASON_NOMEM, NULL);
mutex_lock(&ei->i_fc_lock);
return -ENOMEM;
}
node->fcd_name.len = dentry->d_name.len;
spin_lock(&sbi->s_fc_lock);
- if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING))
+ if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
list_add_tail(&node->fcd_list,
&sbi->s_fc_dentry_q[FC_Q_STAGING]);
else
if (ext4_should_journal_data(inode)) {
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_INODE_JOURNAL_DATA);
+ EXT4_FC_REASON_INODE_JOURNAL_DATA, handle);
return;
}
int ret = 0;
spin_lock(&sbi->s_fc_lock);
- ext4_set_mount_flag(sb, EXT4_MF_FC_COMMITTING);
list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
while (atomic_read(&ei->i_fc_updates)) {
* Fast commit cleanup routine. This is called after every fast commit and
* full commit. full is true if we are called after a full commit.
*/
-static void ext4_fc_cleanup(journal_t *journal, int full)
+static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- ext4_fc_reset_inode(&iter->vfs_inode);
+ if (iter->i_sync_tid <= tid)
+ ext4_fc_reset_inode(&iter->vfs_inode);
/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
smp_mb();
#if (BITS_PER_LONG < 64)
list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
&sbi->s_fc_q[FC_Q_MAIN]);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (tid >= sbi->s_fc_ineligible_tid) {
+ sbi->s_fc_ineligible_tid = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ }
if (full)
sbi->s_fc_bytes = 0;
if (state->fc_modified_inodes[i] == ino)
return 0;
if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
- state->fc_modified_inodes_size +=
- EXT4_FC_REPLAY_REALLOC_INCREMENT;
state->fc_modified_inodes = krealloc(
- state->fc_modified_inodes, sizeof(int) *
- state->fc_modified_inodes_size,
- GFP_KERNEL);
+ state->fc_modified_inodes,
+ sizeof(int) * (state->fc_modified_inodes_size +
+ EXT4_FC_REPLAY_REALLOC_INCREMENT),
+ GFP_KERNEL);
if (!state->fc_modified_inodes)
return -ENOMEM;
+ state->fc_modified_inodes_size +=
+ EXT4_FC_REPLAY_REALLOC_INCREMENT;
}
state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
return 0;
}
inode = NULL;
- ext4_fc_record_modified_inode(sb, ino);
+ ret = ext4_fc_record_modified_inode(sb, ino);
+ if (ret)
+ goto out;
raw_fc_inode = (struct ext4_inode *)
(val + offsetof(struct ext4_fc_inode, fc_raw_inode));
}
/*
- * Record physical disk regions which are in use as per fast commit area. Our
- * simple replay phase allocator excludes these regions from allocation.
+ * Record physical disk regions which are in use as per fast commit area,
+ * and used by inodes during replay phase. Our simple replay phase
+ * allocator excludes these regions from allocation.
*/
-static int ext4_fc_record_regions(struct super_block *sb, int ino,
- ext4_lblk_t lblk, ext4_fsblk_t pblk, int len)
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
{
struct ext4_fc_replay_state *state;
struct ext4_fc_alloc_region *region;
state = &EXT4_SB(sb)->s_fc_replay_state;
+ /*
+ * during replay phase, the fc_regions_valid may not same as
+ * fc_regions_used, update it when do new additions.
+ */
+ if (replay && state->fc_regions_used != state->fc_regions_valid)
+ state->fc_regions_used = state->fc_regions_valid;
if (state->fc_regions_used == state->fc_regions_size) {
state->fc_regions_size +=
EXT4_FC_REPLAY_REALLOC_INCREMENT;
region->pblk = pblk;
region->len = len;
+ if (replay)
+ state->fc_regions_valid++;
+
return 0;
}
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
start = le32_to_cpu(ex->ee_block);
start_pblk = ext4_ext_pblock(ex);
map.m_pblk = 0;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret == 0) {
/* Range is not mapped */
path = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path)) {
- iput(inode);
- return 0;
- }
+ if (IS_ERR(path))
+ goto out;
memset(&newex, 0, sizeof(newex));
newex.ee_block = cpu_to_le32(cur);
ext4_ext_store_pblock(
up_write((&EXT4_I(inode)->i_data_sem));
ext4_ext_drop_refs(path);
kfree(path);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
goto next;
}
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex),
start_pblk + cur - start);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* Mark the old blocks as free since they aren't used
* anymore. We maintain an array of all the modified
ext4_ext_is_unwritten(ex), map.m_pblk);
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex), map.m_pblk);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* We may have split the extent tree while toggling the state.
* Try to shrink the extent tree now.
}
ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
sb->s_blocksize_bits);
+out:
iput(inode);
return 0;
}
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
inode->i_ino, le32_to_cpu(lrange.fc_lblk),
map.m_len = remaining;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret > 0) {
remaining -= ret;
cur += ret;
}
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_ext_remove_space(inode, lrange.fc_lblk,
- lrange.fc_lblk + lrange.fc_len - 1);
+ ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
+ le32_to_cpu(lrange.fc_lblk) +
+ le32_to_cpu(lrange.fc_len) - 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
ext4_ext_replay_shrink_inode(inode,
i_size_read(inode) >> sb->s_blocksize_bits);
ext4_mark_inode_dirty(NULL, inode);
+out:
iput(inode);
-
return 0;
}
ret = ext4_fc_record_regions(sb,
le32_to_cpu(ext.fc_ino),
le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
- ext4_ext_get_actual_len(ex));
+ ext4_ext_get_actual_len(ex), 0);
if (ret < 0)
break;
ret = JBD2_FC_REPLAY_CONTINUE;
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
ext4_discard_preallocations(inode, 0);
struct page **pagep,
void **fsdata)
{
- int ret, inline_size;
+ int ret;
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
goto out;
}
- inline_size = ext4_get_max_inline_size(inode);
-
- ret = -ENOSPC;
- if (inline_size >= pos + len) {
- ret = ext4_prepare_inline_data(handle, inode, pos + len);
- if (ret && ret != -ENOSPC)
- goto out_journal;
- }
+ ret = ext4_prepare_inline_data(handle, inode, pos + len);
+ if (ret && ret != -ENOSPC)
+ goto out_journal;
/*
* We cannot recurse into the filesystem as the transaction
struct ext4_iloc *iloc,
void *buf, int inline_size)
{
- ext4_create_inline_data(handle, inode, inline_size);
+ int ret;
+
+ ret = ext4_create_inline_data(handle, inode, inline_size);
+ if (ret) {
+ ext4_msg(inode->i_sb, KERN_EMERG,
+ "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
+ inode->i_ino, ret);
+ return;
+ }
ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
return;
no_delete:
if (!list_empty(&EXT4_I(inode)->i_fc_list))
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
+ * i_size_read because we hold i_rwsem.
*
* Add inode to orphan list in case we crash before
* truncate finishes
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
/*
/*
* There is a possibility that we're either freeing the inode
* or it's a completely new inode. In those cases we might not
- * have i_mutex locked because it's not necessary.
+ * have i_rwsem locked because it's not necessary.
*/
if (!(inode->i_state & (I_NEW|I_FREEING)))
WARN_ON(!inode_is_locked(inode));
* transaction are already on disk (truncate waits for pages under
* writeback).
*
- * Called with inode->i_mutex down.
+ * Called with inode->i_rwsem down.
*/
int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr)
return PTR_ERR(handle);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+ EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
err = -EINVAL;
goto err_out;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle);
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
err = ext4_resize_fs(sb, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
struct super_block *sb = ar->inode->i_sb;
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i = sb->s_blocksize;
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_grpblk_t i = 0;
ext4_fsblk_t goal, block;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
ext4_get_group_no_and_offset(sb,
max(ext4_group_first_block_no(sb, group), goal),
NULL, &blkoff);
- i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
+ while (1) {
+ i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
blkoff);
+ if (i >= max)
+ break;
+ if (ext4_fc_replay_check_excluded(sb,
+ ext4_group_first_block_no(sb, group) + i)) {
+ blkoff = i + 1;
+ } else
+ break;
+ }
brelse(bitmap_bh);
- if (i >= sb->s_blocksize)
- continue;
- if (ext4_fc_replay_check_excluded(sb,
- ext4_group_first_block_no(sb, group) + i))
- continue;
- break;
+ if (i < max)
+ break;
}
- if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
+ if (group >= ext4_get_groups_count(sb) || i >= max) {
+ *errp = -ENOSPC;
return 0;
+ }
block = ext4_group_first_block_no(sb, group) + i;
ext4_mb_mark_bb(sb, block, 1, 1);
* when we add extents we extent the journal
*/
/*
- * Even though we take i_mutex we can still cause block
+ * Even though we take i_rwsem we can still cause block
* allocation via mmap write to holes. If we have allocated
* new blocks we fail migrate. New block allocation will
* clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
* dirents in directories.
*/
ext4_fc_mark_ineligible(old.inode->i_sb,
- EXT4_FC_REASON_RENAME_DIR);
+ EXT4_FC_REASON_RENAME_DIR, handle);
} else {
if (new.inode)
ext4_fc_track_unlink(handle, new.dentry);
if (unlikely(retval))
goto end_rename;
ext4_fc_mark_ineligible(new.inode->i_sb,
- EXT4_FC_REASON_CROSS_RENAME);
+ EXT4_FC_REASON_CROSS_RENAME, handle);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
* At filesystem recovery time, we walk this list deleting unlinked
* inodes and truncating linked inodes in ext4_orphan_cleanup().
*
- * Orphan list manipulation functions must be called under i_mutex unless
+ * Orphan list manipulation functions must be called under i_rwsem unless
* we are just creating the inode or deleting it.
*/
int ext4_orphan_add(handle_t *handle, struct inode *inode)
/*
* Orphan handling is only valid for files with data blocks
* being truncated, or files being unlinked. Note that we either
- * hold i_mutex, or the inode can not be referenced from outside,
+ * hold i_rwsem, or the inode can not be referenced from outside,
* so i_nlink should not be bumped due to race
*/
ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
sbi->s_fc_bytes = 0;
ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
+ sbi->s_fc_ineligible_tid = 0;
spin_lock_init(&sbi->s_fc_lock);
memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
sbi->s_fc_replay_state.fc_regions = NULL;
sbi = ext4_alloc_sbi(sb);
if (!sbi)
- ret = -ENOMEM;
+ return -ENOMEM;
fc->s_fs_info = sbi;
if (IS_SYNC(inode))
ext4_handle_sync(handle);
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
cleanup:
brelse(is.iloc.bh);
if (error == 0)
error = error2;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, NULL);
return error;
}
error);
goto cleanup;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
}
error = 0;
cleanup:
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/swap.h>
+#include <linux/kmemleak.h>
#include <linux/atomic.h>
static int __init init_fs_stat_sysctls(void)
{
register_sysctl_init("fs", fs_stat_sysctls);
+ if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+ struct ctl_table_header *hdr;
+ hdr = register_sysctl_mount_point("fs/binfmt_misc");
+ kmemleak_not_leak(hdr);
+ }
return 0;
}
fs_initcall(init_fs_stat_sysctls);
kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_rs_active(&ip->i_res))
- gfs2_rs_delete(ip, &inode->i_writecount);
- if (file->f_mode & FMODE_WRITE)
+ if (file->f_mode & FMODE_WRITE) {
+ if (gfs2_rs_active(&ip->i_res))
+ gfs2_rs_delete(ip, &inode->i_writecount);
gfs2_qa_put(ip);
+ }
return 0;
}
void gfs2_glock_put(struct gfs2_glock *gl)
{
- /* last put could call sleepable dlm api */
- might_sleep();
-
if (lockref_put_or_lock(&gl->gl_lockref))
return;
min_ret = iov_iter_count(&msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags);
-out_free:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock)
return -EAGAIN;
ret = -EINTR;
req_set_fail(req);
} else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+out_free:
req_set_fail(req);
}
-
__io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
return 0;
}
static void *io_mem_alloc(size_t size)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
- __GFP_NORETRY | __GFP_ACCOUNT;
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
- return (void *) __get_free_pages(gfp_flags, get_order(size));
+ return (void *) __get_free_pages(gfp, get_order(size));
}
static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
#include "../internal.h"
+#define IOEND_BATCH_SIZE 4096
+
/*
* Structure allocated for each folio when block size < folio size
* to track sub-folio uptodate status and I/O completions.
* state, release holds on bios, and finally free up memory. Do not use the
* ioend after this.
*/
-static void
+static u32
iomap_finish_ioend(struct iomap_ioend *ioend, int error)
{
struct inode *inode = ioend->io_inode;
u64 start = bio->bi_iter.bi_sector;
loff_t offset = ioend->io_offset;
bool quiet = bio_flagged(bio, BIO_QUIET);
+ u32 folio_count = 0;
for (bio = &ioend->io_inline_bio; bio; bio = next) {
struct folio_iter fi;
next = bio->bi_private;
/* walk all folios in bio, ending page IO on them */
- bio_for_each_folio_all(fi, bio)
+ bio_for_each_folio_all(fi, bio) {
iomap_finish_folio_write(inode, fi.folio, fi.length,
error);
+ folio_count++;
+ }
bio_put(bio);
}
/* The ioend has been freed by bio_put() */
"%s: writeback error on inode %lu, offset %lld, sector %llu",
inode->i_sb->s_id, inode->i_ino, offset, start);
}
+ return folio_count;
}
+/*
+ * Ioend completion routine for merged bios. This can only be called from task
+ * contexts as merged ioends can be of unbound length. Hence we have to break up
+ * the writeback completions into manageable chunks to avoid long scheduler
+ * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
+ * good batch processing throughput without creating adverse scheduler latency
+ * conditions.
+ */
void
iomap_finish_ioends(struct iomap_ioend *ioend, int error)
{
struct list_head tmp;
+ u32 completions;
+
+ might_sleep();
list_replace_init(&ioend->io_list, &tmp);
- iomap_finish_ioend(ioend, error);
+ completions = iomap_finish_ioend(ioend, error);
while (!list_empty(&tmp)) {
+ if (completions > IOEND_BATCH_SIZE * 8) {
+ cond_resched();
+ completions = 0;
+ }
ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
list_del_init(&ioend->io_list);
- iomap_finish_ioend(ioend, error);
+ completions += iomap_finish_ioend(ioend, error);
}
}
EXPORT_SYMBOL_GPL(iomap_finish_ioends);
return false;
if (ioend->io_offset + ioend->io_size != next->io_offset)
return false;
+ /*
+ * Do not merge physically discontiguous ioends. The filesystem
+ * completion functions will have to iterate the physical
+ * discontiguities even if we merge the ioends at a logical level, so
+ * we don't gain anything by merging physical discontiguities here.
+ *
+ * We cannot use bio->bi_iter.bi_sector here as it is modified during
+ * submission so does not point to the start sector of the bio at
+ * completion.
+ */
+ if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
+ return false;
return true;
}
ioend->io_flags = wpc->iomap.flags;
ioend->io_inode = inode;
ioend->io_size = 0;
+ ioend->io_folios = 0;
ioend->io_offset = offset;
ioend->io_bio = bio;
+ ioend->io_sector = sector;
return ioend;
}
return false;
if (sector != bio_end_sector(wpc->ioend->io_bio))
return false;
+ /*
+ * Limit ioend bio chain lengths to minimise IO completion latency. This
+ * also prevents long tight loops ending page writeback on all the
+ * folios in the ioend.
+ */
+ if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
+ return false;
return true;
}
&submit_list);
count++;
}
+ if (count)
+ wpc->ioend->io_folios++;
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
WARN_ON_ONCE(!folio_test_locked(folio));
stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
stats.run.rs_locked);
- spin_lock(&commit_transaction->t_handle_lock);
- while (atomic_read(&commit_transaction->t_updates)) {
- DEFINE_WAIT(wait);
+ // waits for any t_updates to finish
+ jbd2_journal_wait_updates(journal);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&commit_transaction->t_updates)) {
- spin_unlock(&commit_transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- write_lock(&journal->j_state_lock);
- spin_lock(&commit_transaction->t_handle_lock);
- }
- finish_wait(&journal->j_wait_updates, &wait);
- }
- spin_unlock(&commit_transaction->t_handle_lock);
commit_transaction->t_state = T_SWITCH;
write_unlock(&journal->j_state_lock);
commit_transaction->t_state = T_COMMIT_DFLUSH;
write_unlock(&journal->j_state_lock);
- /*
+ /*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
* the commit record
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 1);
+ journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
trace_jbd2_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: commit %d complete, head %d\n",
{
jbd2_journal_unlock_updates(journal);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 0);
+ journal->j_fc_cleanup_callback(journal, 0, tid);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
/**
* jbd2_journal_shrink_scan()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Scan the checkpointed buffer on the checkpoint list and release the
* journal_head.
/**
* jbd2_journal_shrink_count()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Count the number of checkpoint buffers on the checkpoint list.
*/
}
/* OK, account for the buffers that this operation expects to
- * use and add the handle to the running transaction.
+ * use and add the handle to the running transaction.
*/
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
}
EXPORT_SYMBOL(jbd2_journal_restart);
+/*
+ * Waits for any outstanding t_updates to finish.
+ * This is called with write j_state_lock held.
+ */
+void jbd2_journal_wait_updates(journal_t *journal)
+{
+ transaction_t *commit_transaction = journal->j_running_transaction;
+
+ if (!commit_transaction)
+ return;
+
+ spin_lock(&commit_transaction->t_handle_lock);
+ while (atomic_read(&commit_transaction->t_updates)) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&journal->j_wait_updates, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&commit_transaction->t_updates)) {
+ spin_unlock(&commit_transaction->t_handle_lock);
+ write_unlock(&journal->j_state_lock);
+ schedule();
+ write_lock(&journal->j_state_lock);
+ spin_lock(&commit_transaction->t_handle_lock);
+ }
+ finish_wait(&journal->j_wait_updates, &wait);
+ }
+ spin_unlock(&commit_transaction->t_handle_lock);
+}
+
/**
* jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
write_lock(&journal->j_state_lock);
}
- /* Wait until there are no running updates */
- while (1) {
- transaction_t *transaction = journal->j_running_transaction;
-
- if (!transaction)
- break;
+ /* Wait until there are no running t_updates */
+ jbd2_journal_wait_updates(journal);
- spin_lock(&transaction->t_handle_lock);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&transaction->t_updates)) {
- spin_unlock(&transaction->t_handle_lock);
- finish_wait(&journal->j_wait_updates, &wait);
- break;
- }
- spin_unlock(&transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- finish_wait(&journal->j_wait_updates, &wait);
- write_lock(&journal->j_state_lock);
- }
write_unlock(&journal->j_state_lock);
/*
#include "mgmt/user_config.h"
#include "crypto_ctx.h"
#include "transport_ipc.h"
+#include "../smbfs_common/arc4.h"
/*
* Fixed format data defining GSS header and fixed string
nt_len - CIFS_ENCPWD_SIZE,
domain_name, conn->ntlmssp.cryptkey);
kfree(domain_name);
+
+ /* The recovered secondary session key */
+ if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) {
+ struct arc4_ctx *ctx_arc4;
+ unsigned int sess_key_off, sess_key_len;
+
+ sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset);
+ sess_key_len = le16_to_cpu(authblob->SessionKey.Length);
+
+ if (blob_len < (u64)sess_key_off + sess_key_len)
+ return -EINVAL;
+
+ ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+ if (!ctx_arc4)
+ return -ENOMEM;
+
+ cifs_arc4_setkey(ctx_arc4, sess->sess_key,
+ SMB2_NTLMV2_SESSKEY_SIZE);
+ cifs_arc4_crypt(ctx_arc4, sess->sess_key,
+ (char *)authblob + sess_key_off, sess_key_len);
+ kfree_sensitive(ctx_arc4);
+ }
+
return ret;
}
(cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+ if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+
chgblob->NegotiateFlags = cpu_to_le32(flags);
len = strlen(ksmbd_netbios_name());
name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
(struct create_posix *)context;
if (le16_to_cpu(context->DataOffset) +
le32_to_cpu(context->DataLength) <
- sizeof(struct create_posix)) {
+ sizeof(struct create_posix) - 4) {
rc = -EINVAL;
goto err_out1;
}
goto free_conv_name;
}
- struct_sz = readdir_info_level_struct_sz(info_level);
- next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
- KSMBD_DIR_INFO_ALIGNMENT);
+ struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
+ next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
+ d_info->last_entry_off_align = next_entry_offset - struct_sz;
if (next_entry_offset > d_info->out_buf_len) {
d_info->out_buf_len = 0;
((struct file_directory_info *)
((char *)rsp->Buffer + d_info.last_entry_offset))
->NextEntryOffset = 0;
+ d_info.data_count -= d_info.last_entry_off_align;
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
__le16 ChannelInfoOffset,
__le16 ChannelInfoLength)
{
+ unsigned int i, ch_count;
+
if (work->conn->dialect == SMB30_PROT_ID &&
Channel != SMB2_CHANNEL_RDMA_V1)
return -EINVAL;
- if (ChannelInfoOffset == 0 ||
- le16_to_cpu(ChannelInfoLength) < sizeof(*desc))
+ ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc);
+ if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) {
+ for (i = 0; i < ch_count; i++) {
+ pr_info("RDMA r/w request %#x: token %#x, length %#x\n",
+ i,
+ le32_to_cpu(desc[i].token),
+ le32_to_cpu(desc[i].length));
+ }
+ }
+ if (ch_count != 1) {
+ ksmbd_debug(RDMA, "RDMA multiple buffer descriptors %d are not supported yet\n",
+ ch_count);
return -EINVAL;
+ }
work->need_invalidate_rkey =
(Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
req->Channel == SMB2_CHANNEL_RDMA_V1) {
+ unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
+
+ if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
+ err = -EINVAL;
+ goto out;
+ }
err = smb2_set_remote_key_for_rdma(work,
(struct smb2_buffer_desc_v1 *)
- &req->Buffer[0],
+ ((char *)req + ch_offset),
req->Channel,
req->ReadChannelInfoOffset,
req->ReadChannelInfoLength);
if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
- if (req->Length != 0 || req->DataOffset != 0)
- return -EINVAL;
+ unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
+
+ if (req->Length != 0 || req->DataOffset != 0 ||
+ ch_offset < offsetof(struct smb2_write_req, Buffer)) {
+ err = -EINVAL;
+ goto out;
+ }
err = smb2_set_remote_key_for_rdma(work,
(struct smb2_buffer_desc_v1 *)
- &req->Buffer[0],
+ ((char *)req + ch_offset),
req->Channel,
req->WriteChannelInfoOffset,
req->WriteChannelInfoLength);
for (i = 0; i < 2; i++) {
struct kstat kstat;
struct ksmbd_kstat ksmbd_kstat;
+ struct dentry *dentry;
if (!dir->dot_dotdot[i]) { /* fill dot entry info */
if (i == 0) {
d_info->name = ".";
d_info->name_len = 1;
+ dentry = dir->filp->f_path.dentry;
} else {
d_info->name = "..";
d_info->name_len = 2;
+ dentry = dir->filp->f_path.dentry->d_parent;
}
if (!match_pattern(d_info->name, d_info->name_len,
ksmbd_kstat.kstat = &kstat;
ksmbd_vfs_fill_dentry_attrs(work,
user_ns,
- dir->filp->f_path.dentry->d_parent,
+ dentry,
&ksmbd_kstat);
rc = fn(conn, info_level, d_info, &ksmbd_kstat);
if (rc)
/* The maximum single-message size which can be received */
static int smb_direct_max_receive_size = 8192;
-static int smb_direct_max_read_write_size = 1048512;
+static int smb_direct_max_read_write_size = 524224;
static int smb_direct_max_outstanding_rw_ops = 8;
int last_entry_offset;
bool hide_dot_file;
int flags;
+ int last_entry_off_align;
};
struct ksmbd_readdir_data {
INIT_LIST_HEAD(&clp->cl_superblocks);
clp->cl_rpcclient = ERR_PTR(-EINVAL);
+ clp->cl_flags = cl_init->init_flags;
clp->cl_proto = cl_init->proto;
clp->cl_nconnect = cl_init->nconnect;
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- new->cl_flags = cl_init->init_flags;
return rpc_ops->init_client(new, cl_init);
}
ctx->dir_cookie = 0;
ctx->dup_cookie = 0;
ctx->page_index = 0;
+ ctx->eof = false;
spin_lock(&dir->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
unsigned int cache_entry_index;
signed char duped;
bool plus;
+ bool eob;
bool eof;
};
status = nfs_readdir_page_filler(desc, entry, pages, pglen,
arrays, narrays);
- } while (!status && nfs_readdir_page_needs_filling(page));
+ } while (!status && nfs_readdir_page_needs_filling(page) &&
+ page_mapping(page));
nfs_readdir_free_pages(pages, array_size);
out:
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->name, ent->name_len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
- desc->eof = true;
+ desc->eob = true;
break;
}
memcpy(desc->verf, verf, sizeof(desc->verf));
desc->duped = 1;
}
if (array->page_is_eof)
- desc->eof = true;
+ desc->eof = !desc->eob;
kunmap(desc->page);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
goto out;
desc->page_index = 0;
+ desc->cache_entry_index = 0;
desc->last_cookie = desc->dir_cookie;
desc->duped = 0;
status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz);
- for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
+ for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
desc->page = arrays[i];
nfs_do_filldir(desc, verf);
}
desc->duped = dir_ctx->duped;
page_index = dir_ctx->page_index;
desc->attr_gencount = dir_ctx->attr_gencount;
+ desc->eof = dir_ctx->eof;
memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf));
spin_unlock(&file->f_lock);
+ if (desc->eof) {
+ res = 0;
+ goto out_free;
+ }
+
if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) &&
list_is_singular(&nfsi->open_files))
invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1);
nfs_do_filldir(desc, nfsi->cookieverf);
nfs_readdir_page_unlock_and_put_cached(desc);
- } while (!desc->eof);
+ } while (!desc->eob && !desc->eof);
spin_lock(&file->f_lock);
dir_ctx->dir_cookie = desc->dir_cookie;
dir_ctx->duped = desc->duped;
dir_ctx->attr_gencount = desc->attr_gencount;
dir_ctx->page_index = desc->page_index;
+ dir_ctx->eof = desc->eof;
memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf));
spin_unlock(&file->f_lock);
-
+out_free:
kfree(desc);
out:
if (offset == 0)
memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf));
dir_ctx->duped = 0;
+ dir_ctx->eof = false;
}
spin_unlock(&filp->f_lock);
return offset;
/**
* nfs4_proc_get_locations - discover locations for a migrated FSID
- * @inode: inode on FSID that is migrating
+ * @server: pointer to nfs_server to process
+ * @fhandle: pointer to the kernel NFS client file handle
* @locations: result of query
* @page: buffer
* @cred: credential to use for this operation
unsigned int len;
int v;
- argp->count = min_t(u32, argp->count, max_blocksize);
-
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long long) argp->offset);
+ argp->count = min_t(u32, argp->count, max_blocksize);
+ if (argp->offset > (u64)OFFSET_MAX)
+ argp->offset = (u64)OFFSET_MAX;
+ if (argp->offset + argp->count > (u64)OFFSET_MAX)
+ argp->count = (u64)OFFSET_MAX - argp->offset;
+
v = 0;
len = argp->count;
resp->pages = rqstp->rq_next_page;
(unsigned long long) argp->offset,
argp->stable? " stable" : "");
+ resp->status = nfserr_fbig;
+ if (argp->offset > (u64)OFFSET_MAX ||
+ argp->offset + argp->len > (u64)OFFSET_MAX)
+ return rpc_success;
+
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
nvecs = svc_fill_write_vector(rqstp, &argp->payload);
argp->count,
(unsigned long long) argp->offset);
- if (argp->offset > NFS_OFFSET_MAX) {
- resp->status = nfserr_inval;
- goto out;
- }
-
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_commit(rqstp, &resp->fh, argp->offset,
argp->count, resp->verf);
-out:
return rpc_success;
}
if (xdr_stream_decode_u64(xdr, &newsize) < 0)
return false;
iap->ia_valid |= ATTR_SIZE;
- iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX);
+ iap->ia_size = newsize;
}
if (xdr_stream_decode_u32(xdr, &set_it) < 0)
return false;
return false;
/* cookie */
resp->cookie_offset = dirlist->len;
- if (xdr_stream_encode_u64(xdr, NFS_OFFSET_MAX) < 0)
+ if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0)
return false;
return true;
__be32 status;
read->rd_nf = NULL;
- if (read->rd_offset >= OFFSET_MAX)
- return nfserr_inval;
trace_nfsd_read_start(rqstp, &cstate->current_fh,
read->rd_offset, read->rd_length);
+ read->rd_length = min_t(u32, read->rd_length, svc_max_payload(rqstp));
+ if (read->rd_offset > (u64)OFFSET_MAX)
+ read->rd_offset = (u64)OFFSET_MAX;
+ if (read->rd_offset + read->rd_length > (u64)OFFSET_MAX)
+ read->rd_length = (u64)OFFSET_MAX - read->rd_offset;
+
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
unsigned long cnt;
int nvecs;
- if (write->wr_offset >= OFFSET_MAX)
- return nfserr_inval;
+ if (write->wr_offset > (u64)OFFSET_MAX ||
+ write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
+ return nfserr_fbig;
cnt = write->wr_buflen;
trace_nfsd_write_start(rqstp, &cstate->current_fh,
p = xdr_reserve_space(xdr, 3*4 + namlen);
if (!p)
goto fail;
- p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
+ p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
}
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
if (file->f_op->splice_read &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
return nfserr_resource;
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
count = maxcount;
eof = read->rd_offset >= i_size_read(file_inode(file));
DECLARE_EVENT_CLASS(nfsd_io_class,
TP_PROTO(struct svc_rqst *rqstp,
struct svc_fh *fhp,
- loff_t offset,
- unsigned long len),
+ u64 offset,
+ u32 len),
TP_ARGS(rqstp, fhp, offset, len),
TP_STRUCT__entry(
__field(u32, xid)
__field(u32, fh_hash)
- __field(loff_t, offset)
- __field(unsigned long, len)
+ __field(u64, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__entry->offset = offset;
__entry->len = len;
),
- TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu",
+ TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u",
__entry->xid, __entry->fh_hash,
__entry->offset, __entry->len)
)
DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
TP_PROTO(struct svc_rqst *rqstp, \
struct svc_fh *fhp, \
- loff_t offset, \
- unsigned long len), \
+ u64 offset, \
+ u32 len), \
TP_ARGS(rqstp, fhp, offset, len))
DEFINE_NFSD_IO_EVENT(read_start);
.ia_size = iap->ia_size,
};
+ host_err = -EFBIG;
+ if (iap->ia_size < 0)
+ goto out_unlock;
+
host_err = notify_change(&init_user_ns, dentry, &size_attr, NULL);
if (host_err)
goto out_unlock;
}
#ifdef CONFIG_NFSD_V3
-/*
- * Commit all pending writes to stable storage.
+/**
+ * nfsd_commit - Commit pending writes to stable storage
+ * @rqstp: RPC request being processed
+ * @fhp: NFS filehandle
+ * @offset: raw offset from beginning of file
+ * @count: raw count of bytes to sync
+ * @verf: filled in with the server's current write verifier
*
- * Note: we only guarantee that data that lies within the range specified
- * by the 'offset' and 'count' parameters will be synced.
+ * Note: we guarantee that data that lies within the range specified
+ * by the 'offset' and 'count' parameters will be synced. The server
+ * is permitted to sync data that lies outside this range at the
+ * same time.
*
* Unfortunately we cannot lock the file to make sure we return full WCC
* data to the client, as locking happens lower down in the filesystem.
+ *
+ * Return values:
+ * An nfsstat value in network byte order.
*/
__be32
-nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
- loff_t offset, unsigned long count, __be32 *verf)
+nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
+ u32 count, __be32 *verf)
{
+ u64 maxbytes;
+ loff_t start, end;
struct nfsd_net *nn;
struct nfsd_file *nf;
- loff_t end = LLONG_MAX;
- __be32 err = nfserr_inval;
-
- if (offset < 0)
- goto out;
- if (count != 0) {
- end = offset + (loff_t)count - 1;
- if (end < offset)
- goto out;
- }
+ __be32 err;
err = nfsd_file_acquire(rqstp, fhp,
NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &nf);
if (err)
goto out;
+
+ /*
+ * Convert the client-provided (offset, count) range to a
+ * (start, end) range. If the client-provided range falls
+ * outside the maximum file size of the underlying FS,
+ * clamp the sync range appropriately.
+ */
+ start = 0;
+ end = LLONG_MAX;
+ maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
+ if (offset < maxbytes) {
+ start = offset;
+ if (count && (offset + count - 1 < maxbytes))
+ end = offset + count - 1;
+ }
+
nn = net_generic(nf->nf_net, nfsd_net_id);
if (EX_ISSYNC(fhp->fh_export)) {
errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
int err2;
- err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+ err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
switch (err2) {
case 0:
nfsd_copy_write_verifier(verf, nn);
char *name, int len, struct iattr *attrs,
struct svc_fh *res, int createmode,
u32 *verifier, bool *truncp, bool *created);
-__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
- loff_t, unsigned long, __be32 *verf);
+__be32 nfsd_commit(struct svc_rqst *rqst, struct svc_fh *fhp,
+ u64 offset, u32 count, __be32 *verf);
#endif /* CONFIG_NFSD_V3 */
#ifdef CONFIG_NFSD_V4
__be32 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
static void smaps_account(struct mem_size_stats *mss, struct page *page,
- bool compound, bool young, bool dirty, bool locked)
+ bool compound, bool young, bool dirty, bool locked,
+ bool migration)
{
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
* page_count(page) == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
* page_count().
+ *
+ * The page_mapcount() is called to get a snapshot of the mapcount.
+ * Without holding the page lock this snapshot can be slightly wrong as
+ * we cannot always read the mapcount atomically. It is not safe to
+ * call page_mapcount() even with PTL held if the page is not mapped,
+ * especially for migration entries. Treat regular migration entries
+ * as mapcount == 1.
*/
- if (page_count(page) == 1) {
+ if ((page_count(page) == 1) || migration) {
smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
locked, true);
return;
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ bool migration = false;
if (pte_present(*pte)) {
page = vm_normal_page(vma, addr, *pte);
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
- } else if (is_pfn_swap_entry(swpent))
+ } else if (is_pfn_swap_entry(swpent)) {
+ if (is_migration_entry(swpent))
+ migration = true;
page = pfn_swap_entry_to_page(swpent);
+ }
} else {
smaps_pte_hole_lookup(addr, walk);
return;
if (!page)
return;
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
+ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
+ locked, migration);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ bool migration = false;
if (pmd_present(*pmd)) {
/* FOLL_DUMP will return -EFAULT on huge zero page */
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
- if (is_migration_entry(entry))
+ if (is_migration_entry(entry)) {
+ migration = true;
page = pfn_swap_entry_to_page(entry);
+ }
}
if (IS_ERR_OR_NULL(page))
return;
/* pass */;
else
mss->file_thp += HPAGE_PMD_SIZE;
- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
+
+ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
+ locked, migration);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
{
u64 frame = 0, flags = 0;
struct page *page = NULL;
+ bool migration = false;
if (pte_present(pte)) {
if (pm->show_pfn)
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
+ migration = is_migration_entry(entry);
if (is_pfn_swap_entry(entry))
page = pfn_swap_entry_to_page(entry);
}
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (page && page_mapcount(page) == 1)
+ if (page && !migration && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
spinlock_t *ptl;
pte_t *pte, *orig_pte;
int err = 0;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bool migration = false;
+
ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
u64 flags = 0, frame = 0;
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_BUG_ON(!is_pmd_migration_entry(pmd));
+ migration = is_migration_entry(entry);
page = pfn_swap_entry_to_page(entry);
}
#endif
- if (page && page_mapcount(page) == 1)
+ if (page && !migration && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
for (; addr != end; addr += PAGE_SIZE) {
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible... */
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
- sync_blockdev(sb->s_bdev);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
+ ret = sync_blockdev(sb->s_bdev);
+ if (ret)
+ return ret;
/*
* Now when everything is written we can discard the pagecache so
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
}
-static void sb_freeze_unlock(struct super_block *sb)
+static void sb_freeze_unlock(struct super_block *sb, int level)
{
- int level;
-
- for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
+ for (level--; level >= 0; level--)
percpu_up_write(sb->s_writers.rw_sem + level);
}
sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
/* All writers are done so after syncing there won't be dirty data */
- sync_filesystem(sb);
+ ret = sync_filesystem(sb);
+ if (ret) {
+ sb->s_writers.frozen = SB_UNFROZEN;
+ sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
+ wake_up(&sb->s_writers.wait_unfrozen);
+ deactivate_locked_super(sb);
+ return ret;
+ }
/* Now wait for internal filesystem counter */
sb->s_writers.frozen = SB_FREEZE_FS;
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_writers.frozen = SB_UNFROZEN;
- sb_freeze_unlock(sb);
+ sb_freeze_unlock(sb, SB_FREEZE_FS);
wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
return ret;
}
sb->s_writers.frozen = SB_UNFROZEN;
- sb_freeze_unlock(sb);
+ sb_freeze_unlock(sb, SB_FREEZE_FS);
out:
wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
*/
int sync_filesystem(struct super_block *sb)
{
- int ret;
+ int ret = 0;
/*
* We need to be protected against the filesystem going from
* at a time.
*/
writeback_inodes_sb(sb, WB_REASON_SYNC);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 0);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 0);
+ if (ret)
+ return ret;
+ }
ret = sync_blockdev_nowait(sb->s_bdev);
- if (ret < 0)
+ if (ret)
return ret;
sync_inodes_sb(sb);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
return sync_blockdev(sb->s_bdev);
}
EXPORT_SYMBOL(sync_filesystem);
memalloc_nofs_restore(nofs_flag);
}
-/* Finish all pending io completions. */
+/*
+ * Finish all pending IO completions that require transactional modifications.
+ *
+ * We try to merge physical and logically contiguous ioends before completion to
+ * minimise the number of transactions we need to perform during IO completion.
+ * Both unwritten extent conversion and COW remapping need to iterate and modify
+ * one physical extent at a time, so we gain nothing by merging physically
+ * discontiguous extents here.
+ *
+ * The ioend chain length that we can be processing here is largely unbound in
+ * length and we may have to perform significant amounts of work on each ioend
+ * to complete it. Hence we have to be careful about holding the CPU for too
+ * long in this loop.
+ */
void
xfs_end_io(
struct work_struct *work)
list_del_init(&ioend->io_list);
iomap_ioend_try_merge(ioend, &tmp);
xfs_end_ioend(ioend);
+ cond_resched();
}
}
rblocks = 0;
}
- /*
- * Allocate and setup the transaction.
- */
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
dblocks, rblocks, false, &tp);
if (error)
if (error)
goto error;
- /*
- * Complete the transaction
- */
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
return !((pos | len) & mask);
}
-int
-xfs_update_prealloc_flags(
- struct xfs_inode *ip,
- enum xfs_prealloc_flags flags)
-{
- struct xfs_trans *tp;
- int error;
-
- error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
- 0, 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- if (!(flags & XFS_PREALLOC_INVISIBLE)) {
- VFS_I(ip)->i_mode &= ~S_ISUID;
- if (VFS_I(ip)->i_mode & S_IXGRP)
- VFS_I(ip)->i_mode &= ~S_ISGID;
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
-
- if (flags & XFS_PREALLOC_SET)
- ip->i_diflags |= XFS_DIFLAG_PREALLOC;
- if (flags & XFS_PREALLOC_CLEAR)
- ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (flags & XFS_PREALLOC_SYNC)
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp);
-}
-
/*
* Fsync operations on directories are much simpler than on regular files,
* as there is no file data to flush, and thus also no need for explicit
return error;
}
+/* Does this file, inode, or mount want synchronous writes? */
+static inline bool xfs_file_sync_writes(struct file *filp)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(filp));
+
+ if (xfs_has_wsync(ip->i_mount))
+ return true;
+ if (filp->f_flags & (__O_SYNC | O_DSYNC))
+ return true;
+ if (IS_SYNC(file_inode(filp)))
+ return true;
+
+ return false;
+}
+
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
struct inode *inode = file_inode(file);
struct xfs_inode *ip = XFS_I(inode);
long error;
- enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
loff_t new_size = 0;
bool do_file_insert = false;
goto out_unlock;
}
+ error = file_modified(file);
+ if (error)
+ goto out_unlock;
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
}
do_file_insert = true;
} else {
- flags |= XFS_PREALLOC_SET;
-
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
}
}
- if (file->f_flags & O_DSYNC)
- flags |= XFS_PREALLOC_SYNC;
-
- error = xfs_update_prealloc_flags(ip, flags);
- if (error)
- goto out_unlock;
-
/* Change file size if needed */
if (new_size) {
struct iattr iattr;
* leave shifted extents past EOF and hence losing access to
* the data that is contained within them.
*/
- if (do_file_insert)
+ if (do_file_insert) {
error = xfs_insert_file_space(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ }
+
+ if (xfs_file_sync_writes(file))
+ error = xfs_log_force_inode(ip);
out_unlock:
xfs_iunlock(ip, iolock);
return ret;
}
-/* Does this file, inode, or mount want synchronous writes? */
-static inline bool xfs_file_sync_writes(struct file *filp)
-{
- struct xfs_inode *ip = XFS_I(file_inode(filp));
-
- if (xfs_has_wsync(ip->i_mount))
- return true;
- if (filp->f_flags & (__O_SYNC | O_DSYNC))
- return true;
- if (IS_SYNC(file_inode(filp)))
- return true;
-
- return false;
-}
-
STATIC loff_t
xfs_file_remap_range(
struct file *file_in,
}
/* from xfs_file.c */
-enum xfs_prealloc_flags {
- XFS_PREALLOC_SET = (1 << 1),
- XFS_PREALLOC_CLEAR = (1 << 2),
- XFS_PREALLOC_SYNC = (1 << 3),
- XFS_PREALLOC_INVISIBLE = (1 << 4),
-};
-
-int xfs_update_prealloc_flags(struct xfs_inode *ip,
- enum xfs_prealloc_flags flags);
int xfs_break_layouts(struct inode *inode, uint *iolock,
enum layout_break_reason reason);
if (bmx.bmv_count < 2)
return -EINVAL;
- if (bmx.bmv_count > ULONG_MAX / recsize)
+ if (bmx.bmv_count >= INT_MAX / recsize)
return -ENOMEM;
buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL);
return 0;
}
+/*
+ * We cannot use file based VFS helpers such as file_modified() to update
+ * inode state as we modify the data/metadata in the inode here. Hence we have
+ * to open code the timestamp updates and SUID/SGID stripping. We also need
+ * to set the inode prealloc flag to ensure that the extents we allocate are not
+ * removed if the inode is reclaimed from memory before xfs_fs_block_commit()
+ * is from the client to indicate that data has been written and the file size
+ * can be extended.
+ */
+static int
+xfs_fs_map_update_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
+ 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ VFS_I(ip)->i_mode &= ~S_ISUID;
+ if (VFS_I(ip)->i_mode & S_IXGRP)
+ VFS_I(ip)->i_mode &= ~S_ISGID;
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return xfs_trans_commit(tp);
+}
+
/*
* Get a layout for the pNFS client.
*/
* that the blocks allocated and handed out to the client are
* guaranteed to be present even after a server crash.
*/
- error = xfs_update_prealloc_flags(ip,
- XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
+ error = xfs_fs_map_update_inode(ip);
+ if (!error)
+ error = xfs_log_force_inode(ip);
if (error)
goto out_unlock;
+
} else {
xfs_iunlock(ip, lock_flags);
}
length = end - start;
if (!length)
continue;
-
+
/*
* Make sure reads through the pagecache see the new data.
*/
int wait)
{
struct xfs_mount *mp = XFS_M(sb);
+ int error;
trace_xfs_fs_sync_fs(mp, __return_address);
if (!wait)
return 0;
- xfs_log_force(mp, XFS_LOG_SYNC);
+ error = xfs_log_force(mp, XFS_LOG_SYNC);
+ if (error)
+ return error;
+
if (laptop_mode) {
/*
* The disk must be active because we're syncing.
state->f[0] = -1;
}
-typedef void (*blake2s_compress_t)(struct blake2s_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
/* Helper functions for BLAKE2s shared by the library and shash APIs */
-static inline void __blake2s_update(struct blake2s_state *state,
- const u8 *in, size_t inlen,
- blake2s_compress_t compress)
+static __always_inline void
+__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
+ bool force_generic)
{
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
return;
if (inlen > fill) {
memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ if (force_generic)
+ blake2s_compress_generic(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
state->buflen = 0;
in += fill;
inlen -= fill;
if (inlen > BLAKE2S_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
/* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ if (force_generic)
+ blake2s_compress_generic(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
}
state->buflen += inlen;
}
-static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
- blake2s_compress_t compress)
+static __always_inline void
+__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
{
blake2s_set_lastblock(state);
memset(state->buf + state->buflen, 0,
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
+ if (force_generic)
+ blake2s_compress_generic(state, state->buf, 1, state->buflen);
+ else
+ blake2s_compress(state, state->buf, 1, state->buflen);
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
memcpy(out, state->h, state->outlen);
}
static inline int crypto_blake2s_update(struct shash_desc *desc,
const u8 *in, unsigned int inlen,
- blake2s_compress_t compress)
+ bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
- __blake2s_update(state, in, inlen, compress);
+ __blake2s_update(state, in, inlen, force_generic);
return 0;
}
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
- blake2s_compress_t compress)
+ bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
- __blake2s_final(state, out, compress);
+ __blake2s_final(state, out, force_generic);
return 0;
}
#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-/* iva clocks */
-#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
/* dss clocks */
#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
/* l3init clocks */
#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device SATA settings log:*/
ATA_LOG_DEVSLP_OFFSET = 0x30,
static inline void check_and_init_map_value(struct bpf_map *map, void *dst)
{
if (unlikely(map_value_has_spin_lock(map)))
- *(struct bpf_spin_lock *)(dst + map->spin_lock_off) =
- (struct bpf_spin_lock){};
+ memset(dst + map->spin_lock_off, 0, sizeof(struct bpf_spin_lock));
if (unlikely(map_value_has_timer(map)))
- *(struct bpf_timer *)(dst + map->timer_off) =
- (struct bpf_timer){};
+ memset(dst + map->timer_off, 0, sizeof(struct bpf_timer));
}
/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */
if (unlikely(map_value_has_spin_lock(map))) {
s_off = map->spin_lock_off;
s_sz = sizeof(struct bpf_spin_lock);
- } else if (unlikely(map_value_has_timer(map))) {
+ }
+ if (unlikely(map_value_has_timer(map))) {
t_off = map->timer_off;
t_sz = sizeof(struct bpf_timer);
}
#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
struct ceph_gcm_nonce in_gcm_nonce;
struct ceph_gcm_nonce out_gcm_nonce;
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
struct page **out_enc_pages;
int out_enc_page_cnt;
int out_enc_resid;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
*/
#define __stringify_label(n) #n
-#define __annotate_reachable(c) ({ \
- asm volatile(__stringify_label(c) ":\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t" : : "i" (c)); \
-})
-#define annotate_reachable() __annotate_reachable(__COUNTER__)
-
#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b - .\n\t" \
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else
-#define annotate_reachable()
#define annotate_unreachable()
+# define ASM_REACHABLE
#define __annotate_jump_table
#endif
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
#ifndef unreachable
# define unreachable() do { \
annotate_unreachable(); \
#endif
}
-static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
+static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
#ifdef __BIG_ENDIAN
* Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
*/
-static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
- const u8 addr2[6+2])
+static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
/* Draws a rectangle */
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
- /* Copy data from area to another. Obsolete. */
+ /* Copy data from area to another */
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
/* Draws a image to the display */
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
struct vmbus_channel *channel;
struct kset *channels_kset;
struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
static inline u8
ieee80211_he_oper_size(const u8 *he_oper_ie)
{
- struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+ const struct ieee80211_he_operation *he_oper = (const void *)he_oper_ie;
u8 oper_len = sizeof(struct ieee80211_he_operation);
u32 he_oper_params;
static inline const struct ieee80211_he_6ghz_oper *
ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
{
- const u8 *ret = (void *)&he_oper->optional;
+ const u8 *ret = (const void *)&he_oper->optional;
u32 he_oper_params;
if (!he_oper)
if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
ret++;
- return (void *)ret;
+ return (const void *)ret;
}
/* HE Spatial Reuse defines */
static inline u8
ieee80211_he_spr_size(const u8 *he_spr_ie)
{
- struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
+ const struct ieee80211_he_spr *he_spr = (const void *)he_spr_ie;
u8 spr_len = sizeof(struct ieee80211_he_spr);
u8 he_spr_params;
struct list_head io_list; /* next ioend in chain */
u16 io_type;
u16 io_flags; /* IOMAP_F_* */
+ u32 io_folios; /* folios added to ioend */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
loff_t io_offset; /* offset in the file */
+ sector_t io_sector; /* start sector of ioend */
struct bio *io_bio; /* bio being built */
struct bio io_inline_bio; /* MUST BE LAST! */
};
*/
unsigned long t_log_start;
- /*
+ /*
* Number of buffers on the t_buffers list [j_list_lock, no locks
* needed for jbd2 thread]
*/
* Clean-up after fast commit or full commit. JBD2 calls this function
* after every commit operation.
*/
- void (*j_fc_cleanup_callback)(struct journal_s *journal, int);
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
/**
* @j_fc_replay_callback:
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
extern int __init jbd2_journal_init_transaction_cache(void);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
/* JBD uses a CRC32 checksum */
#define JBD_MAX_CHECKSUM_SIZE 4
#include <linux/atomic.h>
#include <linux/static_key.h>
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
+#include <linux/ftrace.h>
#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
#include <linux/interval_tree.h>
#include <linux/rbtree.h>
#include <linux/xarray.h>
u64 last_used_slot_gen;
};
-/* must be called with irqs disabled */
-static __always_inline void guest_enter_irqoff(void)
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe to assume that it's the
instrumentation_begin();
vtime_account_guest_enter();
instrumentation_end();
+}
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
/*
* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
}
}
-static __always_inline void guest_exit_irqoff(void)
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
{
context_tracking_guest_exit();
+}
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
instrumentation_begin();
/* Flush the guest cputime we spent on the guest */
vtime_account_guest_exit();
instrumentation_end();
}
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
static inline void guest_exit(void)
{
unsigned long flags;
local_irq_restore(flags);
}
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
+ ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
struct mem_cgroup *memcg;
atomic_t nr_charged_bytes;
union {
- struct list_head list;
+ struct list_head list; /* protected by objcg_lock */
struct rcu_head rcu;
};
};
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
struct obj_cgroup __rcu *objcg;
- struct list_head objcg_list; /* list of inherited objcgs */
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
#endif
MEMCG_PADDING(_pad2_);
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
-int netif_rx_ni(struct sk_buff *skb);
-int netif_rx_any_context(struct sk_buff *skb);
+int __netif_rx(struct sk_buff *skb);
+
+static inline int netif_rx_ni(struct sk_buff *skb)
+{
+ return netif_rx(skb);
+}
+
+static inline int netif_rx_any_context(struct sk_buff *skb)
+{
+ return netif_rx(skb);
+}
+
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
void netif_receive_skb_list_internal(struct list_head *head);
int (*prepare_write)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
struct readahead_control;
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
__u64 dup_cookie;
pgoff_t page_index;
signed char duped;
+ bool eof;
};
/*
pmd_t *pmdp, pmd_t pmd);
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
static inline void page_table_check_alloc(struct page *page, unsigned int order)
{
__page_table_check_pud_set(mm, addr, pudp, pud);
}
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
#else
static inline void page_table_check_alloc(struct page *page, unsigned int order)
{
}
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
#endif /* CONFIG_PAGE_TABLE_CHECK */
#endif /* __LINUX_PAGE_TABLE_CHECK_H */
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
+#define pte_index pte_index
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
#define SOL_XDP 283
#define SOL_MPTCP 284
#define SOL_MCTP 285
+#define SOL_SMC 286
/* IPX options */
#define IPX_TYPE 1
struct ssb_gige *dev = pdev_to_ssb_gige(pdev);
if (dev)
return (dev->dev->bus->chip_id == 0x4785);
- return 0;
+ return false;
}
/* Get the device MAC address */
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
bool is_mptcp;
#endif
#if IS_ENABLED(CONFIG_SMC)
+ bool (*smc_hs_congested)(const struct sock *sk);
bool syn_smc; /* SYN includes SMC */
#endif
#ifdef CONFIG_WWAN_DEBUGFS
struct dentry *wwan_get_debugfs_dir(struct device *parent);
+void wwan_put_debugfs_dir(struct dentry *dir);
#else
static inline struct dentry *wwan_get_debugfs_dir(struct device *parent)
{
return ERR_PTR(-ENODEV);
}
+static inline void wwan_put_debugfs_dir(struct dentry *dir) {}
#endif
#endif /* __WWAN_H */
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
struct bond_3ad_stats stats;
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
*/
static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
{
- return (void *)ieee80211_bss_get_elem(bss, id);
+ return (const void *)ieee80211_bss_get_elem(bss, id);
}
(!match_len && match_offset)))
return NULL;
- return (void *)cfg80211_find_elem_match(eid, ies, len,
- match, match_len,
- match_offset ?
+ return (const void *)cfg80211_find_elem_match(eid, ies, len,
+ match, match_len,
+ match_offset ?
match_offset - 2 : 0);
}
cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
const u8 *ies, unsigned int len)
{
- return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+ return (const void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
}
/**
struct mutex addr_lists_lock;
struct list_head fdbs;
struct list_head mdbs;
+
+ /* List of VLANs that CPU and DSA ports are members of. */
+ struct mutex vlans_lock;
+ struct list_head vlans;
};
/* TODO: ideally DSA ports would have a single dp->link_dp member,
struct list_head list;
};
+struct dsa_vlan {
+ u16 vid;
+ refcount_t refcount;
+ struct list_head list;
+};
+
struct dsa_switch {
struct device *dev;
int dsa_register_switch(struct dsa_switch *ds);
void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+ /* Unclone the dst cache if there is one */
+ if (new_md->u.tun_info.dst_cache.cache) {
+ int ret;
+
+ ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+ if (ret) {
+ metadata_dst_free(new_md);
+ return ERR_PTR(ret);
+ }
+ }
+#endif
+
skb_dst_drop(skb);
- dst_hold(&new_md->dst);
skb_dst_set(skb, &new_md->dst);
return new_md;
}
/*
* Copyright (c) 2017 Intel Deutschland GmbH
- * Copyright (c) 2018-2019 Intel Corporation
+ * Copyright (c) 2018-2019, 2021 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
*/
static inline u16 ieee80211_get_radiotap_len(const char *data)
{
- struct ieee80211_radiotap_header *hdr = (void *)data;
+ const struct ieee80211_radiotap_header *hdr = (const void *)data;
return get_unaligned_le16(&hdr->it_len);
}
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
fib6_destroying:1,
- offload:1,
- trap:1,
- offload_failed:1,
- unused:1;
+ unused:4;
struct rcu_head rcu;
struct nexthop *nh;
__u32 fib_rt_cache; /* cached rt entries in exception table */
__u32 fib_discarded_routes; /* total number of routes delete */
- /* The following stats are not protected by any lock */
+ /* The following stat is not protected by any lock */
atomic_t fib_rt_alloc; /* total number of routes alloced */
- atomic_t fib_rt_uncache; /* rt entries in uncached list */
};
#define RTN_TL_ROOT 0x0001
kfree_rcu(opt, rcu);
}
+#if IS_ENABLED(CONFIG_IPV6)
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
return NULL;
}
+#endif
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2021 Intel Corporation
+ * Copyright (C) 2018 - 2022 Intel Corporation
*/
#ifndef MAC80211_H
IEEE80211_TX_CTRL_DONT_REORDER = BIT(8),
};
+/**
+ * enum mac80211_tx_status_flags - flags to describe transmit status
+ *
+ * @IEEE80211_TX_STATUS_ACK_SIGNAL_VALID: ACK signal is valid
+ *
+ * These flags are used in tx_info->status.flags.
+ */
+enum mac80211_tx_status_flags {
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID = BIT(0),
+};
+
/*
* This definition is used as a mask to clear all temporary flags, which are
* set by the tx handlers for each transmission attempt by the mac80211 stack.
* @status.antenna: (legacy, kept only for iwlegacy)
* @status.tx_time: airtime consumed for transmission; note this is only
* used for WMM AC, not for airtime fairness
- * @status.is_valid_ack_signal: ACK signal is valid
+ * @status.flags: status flags, see &enum mac80211_tx_status_flags
* @status.status_driver_data: driver use area
* @ack: union part for pure ACK data
* @ack.cookie: cookie for the ACK
u8 ampdu_len;
u8 antenna;
u16 tx_time;
- bool is_valid_ack_signal;
- void *status_driver_data[19 / sizeof(void *)];
+ u8 flags;
+ void *status_driver_data[18 / sizeof(void *)];
} status;
struct {
struct ieee80211_tx_rate driver_rates[
return false;
}
-static inline void mptcp_parse_option(const struct sk_buff *skb,
- const unsigned char *ptr, int opsize,
- struct tcp_options_received *opt_rx)
-{
-}
-
static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
unsigned int *size,
struct mptcp_out_options *opts)
spinlock_t fib6_gc_lock;
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
#ifdef CONFIG_IPV6_SUBTREES
unsigned int fib6_routes_require_src;
#endif
/* protect fback_rsn */
struct mutex mutex_fback_rsn;
struct smc_stats_rsn *fback_rsn;
+
+ bool limit_smc_hs; /* constraint on handshake */
};
#endif
struct switchdev_obj obj;
u16 flags;
u16 vid;
+ /* If set, the notifier signifies a change of one of the following
+ * flags for a VLAN that already exists:
+ * - BRIDGE_VLAN_INFO_PVID
+ * - BRIDGE_VLAN_INFO_UNTAGGED
+ * Entries with BRIDGE_VLAN_INFO_BRENTRY unset are not notified at all.
+ */
+ bool changed;
};
#define SWITCHDEV_OBJ_PORT_VLAN(OBJ) \
int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack));
+int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack));
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj));
+int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj));
int switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
return 0;
}
+static inline int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ return 0;
+}
+
static inline int
switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
return 0;
}
+static inline int
+switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ return 0;
+}
+
static inline int
switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
#define REG_RESERVED_ADDR 0xffffffff
#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
-#define OCELOT_MRP_CPUQ 7
-
enum ocelot_target {
ANA = 1,
QS,
char name[ETH_GSTRING_LEN];
};
+struct ocelot_stats_region {
+ struct list_head node;
+ u32 offset;
+ int count;
+ u32 *buf;
+};
+
enum ocelot_tag_prefix {
OCELOT_TAG_PREFIX_DISABLED = 0,
OCELOT_TAG_PREFIX_NONE,
struct regmap_field *regfields[REGFIELD_MAX];
const u32 *const *map;
const struct ocelot_stat_layout *stats_layout;
+ struct list_head stats_regions;
unsigned int num_stats;
u32 pool_size[OCELOT_SB_NUM][OCELOT_SB_POOL_NUM];
u8 base_mac[ETH_ALEN];
struct list_head vlans;
+ struct list_head traps;
/* Switches like VSC9959 have flooding per traffic class */
int num_flooding_pgids;
u32 burst; /* bytes */
};
-#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
-#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
-#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
-
-#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
-#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_bulk_read_rix(ocelot, reg, ri, buf, count) \
+ __ocelot_bulk_read_ix(ocelot, reg, reg##_RSZ * (ri), buf, count)
+
+#define ocelot_read_ix(ocelot, reg, gi, ri) \
+ __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) \
+ __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) \
+ __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) \
+ __ocelot_read_ix(ocelot, reg, 0)
+
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) \
+ __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
-#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
-#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
-#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) \
+ __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
-#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
-#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
-#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_field_write(ocelot, reg, val) \
+ regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) \
+ regmap_field_read((ocelot)->regfields[(reg)], (val))
+#define ocelot_fields_write(ocelot, id, reg, val) \
+ regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_fields_read(ocelot, id, reg, val) \
+ regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
#define ocelot_target_read_ix(ocelot, target, reg, gi, ri) \
__ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
+int __ocelot_bulk_read_ix(struct ocelot *ocelot, u32 reg, u32 offset, void *buf,
+ int count);
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
#include <soc/mscc/ocelot.h>
+/* Cookie definitions for private VCAP filters installed by the driver.
+ * Must be unique per VCAP block.
+ */
+#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
+#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
+#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
+#define OCELOT_VCAP_IS2_L2_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 1)
+#define OCELOT_VCAP_IS2_IPV4_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 2)
+#define OCELOT_VCAP_IS2_IPV4_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 3)
+#define OCELOT_VCAP_IS2_IPV6_GEN_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 4)
+#define OCELOT_VCAP_IS2_IPV6_EV_PTP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2 + 5)
+
/* =================================================================
* VCAP Common
* =================================================================
struct ocelot_vcap_filter {
struct list_head list;
+ struct list_head trap_list;
enum ocelot_vcap_filter_type type;
int block_id;
struct ocelot_vcap_action action;
struct ocelot_vcap_stats stats;
/* For VCAP IS1 and IS2 */
+ bool take_ts;
unsigned long ingress_port_mask;
/* For VCAP ES0 */
struct ocelot_vcap_port ingress_port;
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream);
/**
* snd_pcm_stream_lock_irqsave - Lock the PCM stream
void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
+/**
+ * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with
+ * the single-depth lockdep subclass.
+ */
+#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \
+ } while (0)
+
/**
* snd_pcm_group_for_each_entry - iterate over the linked substreams
* @s: the iterator
TP_ARGS(skb)
);
-DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
-
- TP_PROTO(const struct sk_buff *skb),
-
- TP_ARGS(skb)
-);
-
DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
TP_PROTO(int ret),
TP_ARGS(ret)
);
-DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
-
- TP_PROTO(int ret),
-
- TP_ARGS(ret)
-);
-
DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
TP_PROTO(int ret),
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-/* Available with KVM_CAP_XSAVE2 */
-#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
-
struct kvm_s390_pv_sec_parm {
__u64 origin;
__u64 length;
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
#endif /* __LINUX_KVM_H */
IPS_NAT_CLASH = IPS_UNTRACKED,
#endif
- /* Conntrack got a helper explicitly attached via CT target. */
+ /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
/*
* User provided data if sigtrap=1, passed back to user via
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
*/
__u64 sig_data;
};
SMC_NETLINK_DUMP_SEID,
SMC_NETLINK_ENABLE_SEID,
SMC_NETLINK_DISABLE_SEID,
+ SMC_NETLINK_DUMP_HS_LIMITATION,
+ SMC_NETLINK_ENABLE_HS_LIMITATION,
+ SMC_NETLINK_DISABLE_HS_LIMITATION,
};
/* SMC_GENL_FAMILY top level attributes */
__SMC_NLA_SEID_TABLE_MAX,
SMC_NLA_SEID_TABLE_MAX = __SMC_NLA_SEID_TABLE_MAX - 1
};
+
+/* SMC_NETLINK_HS_LIMITATION attributes */
+enum {
+ SMC_NLA_HS_LIMITATION_UNSPEC,
+ SMC_NLA_HS_LIMITATION_ENABLED, /* u8 */
+ __SMC_NLA_HS_LIMITATION_MAX,
+ SMC_NLA_HS_LIMITATION_MAX = __SMC_NLA_HS_LIMITATION_MAX - 1
+};
+
+/* SMC socket options */
+#define SMC_LIMIT_HS 1 /* constraint on smc handshake */
+
#endif /* _UAPI_LINUX_SMC_H */
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
/*
* Inserts the grant references into the mapping table of an instance
* of gntdev. N.B. This does not perform the mapping, which is deferred
- * until mmap() is called with @index as the offset.
+ * until mmap() is called with @index as the offset. @index should be
+ * considered opaque to userspace, with one exception: if no grant
+ * references have ever been inserted into the mapping table of this
+ * instance, @index will be set to 0. This is necessary to use gntdev
+ * with userspace APIs that expect a file descriptor that can be
+ * mmap()'d at offset 0, such as Wayland. If @count is set to 0, this
+ * ioctl will fail.
*/
#define IOCTL_GNTDEV_MAP_GRANT_REF \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
/******************************************************************************
- * evtchn.h
- *
* Interface to /dev/xen/xenbus_backend.
*
* Copyright (c) 2011 Bastian Blank <waldi@debian.org>
*/
un = lookup_undo(ulp, semid);
if (un) {
+ spin_unlock(&ulp->lock);
kvfree(new);
goto success;
}
ipc_assert_locked_object(&sma->sem_perm);
list_add(&new->list_id, &sma->list_id);
un = new;
-
-success:
spin_unlock(&ulp->lock);
+success:
sem_unlock(sma, -1);
out:
return un;
case AUDITSC_EXECVE:
return mask & AUDIT_PERM_EXEC;
case AUDITSC_OPENAT2:
- return mask & ACC_MODE((u32)((struct open_how *)ctx->argv[2])->flags);
+ return mask & ACC_MODE((u32)ctx->openat2.flags);
default:
return 0;
}
}
if (check_ptr_off_reg(env, reg, regno))
return -EINVAL;
- } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
+ } else if (is_kfunc && (reg->type == PTR_TO_BTF_ID ||
+ (reg2btf_ids[base_type(reg->type)] && !type_flag(reg->type)))) {
const struct btf_type *reg_ref_t;
const struct btf *reg_btf;
const char *reg_ref_tname;
}
} else {
reg_btf = btf_vmlinux;
- reg_ref_id = *reg2btf_ids[reg->type];
+ reg_ref_id = *reg2btf_ids[base_type(reg->type)];
}
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id,
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
*/
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/bpf-cgroup.h>
#include <linux/rcupdate.h>
#include <linux/random.h>
void *key;
u32 idx;
+ BTF_TYPE_EMIT(struct bpf_timer);
callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held());
if (!callback_fn)
goto out;
maybe_wait_bpf_programs(map);
if (err)
break;
+ cond_resched();
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
err = -EFAULT;
if (err)
break;
+ cond_resched();
}
if (copy_to_user(&uattr->batch.count, &cp, sizeof(cp)))
swap(prev_key, key);
retry = MAP_LOOKUP_RETRIES;
cp++;
+ cond_resched();
}
if (err == -EFAULT)
*/
static void perf_cgroup_switch(struct task_struct *task, int mode)
{
- struct perf_cpu_context *cpuctx;
+ struct perf_cpu_context *cpuctx, *tmp;
struct list_head *list;
unsigned long flags;
local_irq_save(flags);
list = this_cpu_ptr(&cgrp_cpuctx_list);
- list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
+ list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
return err;
}
+/*
+ * Copy event-type-independent attributes that may be modified.
+ */
+static void perf_event_modify_copy_attr(struct perf_event_attr *to,
+ const struct perf_event_attr *from)
+{
+ to->sig_data = from->sig_data;
+}
+
static int perf_event_modify_attr(struct perf_event *event,
struct perf_event_attr *attr)
{
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
+ /*
+ * Event-type-independent attributes must be copied before event-type
+ * modification, which will validate that final attributes match the
+ * source attributes after all relevant attributes have been copied.
+ */
+ perf_event_modify_copy_attr(&event->attr, attr);
err = func(event, attr);
if (err)
goto out;
list_for_each_entry(child, &event->child_list, child_list) {
+ perf_event_modify_copy_attr(&child->attr, attr);
err = func(child, attr);
if (err)
goto out;
info->max_pages = info->used_pages = 0;
}
+#ifdef CONFIG_SYSFS
static ssize_t compression_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return 0;
}
late_initcall(module_decompress_sysfs_init);
+#endif
struct kobj_attribute *attr,
char *buf)
{
- return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
+ if (!pm_wakeup_irq())
+ return -ENODATA;
+
+ return sprintf(buf, "%u\n", pm_wakeup_irq());
}
power_attr_ro(pm_wakeup_irq);
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
- pm_wakeup_clear(true);
+ pm_wakeup_clear(0);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
break;
}
- pm_wakeup_clear(false);
-
s2idle_enter();
}
}
#endif
-static void set_load_weight(struct task_struct *p, bool update_load)
+static void set_load_weight(struct task_struct *p)
{
+ bool update_load = !(READ_ONCE(p->__state) & TASK_NEW);
int prio = p->static_prio - MAX_RT_PRIO;
struct load_weight *load = &p->se.load;
p->static_prio = NICE_TO_PRIO(0);
p->prio = p->normal_prio = p->static_prio;
- set_load_weight(p, false);
+ set_load_weight(p);
/*
* We don't need the reset flag anymore after the fork. It has
put_prev_task(rq, p);
p->static_prio = NICE_TO_PRIO(nice);
- set_load_weight(p, true);
+ set_load_weight(p);
old_prio = p->prio;
p->prio = effective_prio(p);
*/
p->rt_priority = attr->sched_priority;
p->normal_prio = normal_prio(p);
- set_load_weight(p, true);
+ set_load_weight(p);
}
/*
#endif
}
- set_load_weight(&init_task, false);
+ set_load_weight(&init_task);
/*
* The boot idle thread does lazy MMU switching as well:
#include <linux/syscalls.h>
#include <linux/sysctl.h>
+/* Not exposed in headers: strictly internal use only. */
+#define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
+
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
#include <asm/syscall.h>
#endif
#ifdef SECCOMP_DEBUG
dump_stack();
#endif
+ current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
do_exit(SIGKILL);
}
case SECCOMP_RET_KILL_THREAD:
case SECCOMP_RET_KILL_PROCESS:
default:
+ current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGSYS, action, true);
/* Dump core only if this is the last remaining thread. */
if (action != SECCOMP_RET_KILL_THREAD ||
return 0;
case SECCOMP_MODE_FILTER:
return __seccomp_filter(this_syscall, sd, false);
+ /* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
+ case SECCOMP_MODE_DEAD:
+ WARN_ON_ONCE(1);
+ do_exit(SIGKILL);
+ return -1;
default:
BUG();
}
}
/*
* Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
- * debugging to leave init killable.
+ * debugging to leave init killable. But HANDLER_EXIT is always fatal.
*/
- if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
+ if (action->sa.sa_handler == SIG_DFL &&
+ (!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = send_signal(sig, info, t, PIDTYPE_PID);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
static int __init set_tracepoint_printk(char *str)
{
+ /* Ignore the "tp_printk_stop_on_boot" param */
+ if (*str == '_')
+ return 0;
+
if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
tracepoint_printk = 1;
return 1;
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- __blake2s_update(state, in, inlen, blake2s_compress);
+ __blake2s_update(state, in, inlen, false);
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- __blake2s_final(state, out, blake2s_compress);
+ __blake2s_final(state, out, false);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
pte = ptep_get(args->ptep);
WARN_ON(pte_young(pte));
+
+ ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
}
static void __init pte_savedwrite_tests(struct pgtable_debug_args *args)
static bool kfence_enabled __read_mostly;
-static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
+EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
/*
* Especially for non-preemption kernels, ensure the allocation-gate
* timer can catch up: after @resched_after, every failed allocation
* attempt yields, to ensure the allocation-gate timer is scheduled.
*/
- resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
+ resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
do {
if (test_cache)
alloc = kmem_cache_alloc(test_cache, gfp);
int i;
/* Skip if we think it'd take too long. */
- KFENCE_TEST_REQUIRES(test, CONFIG_KFENCE_SAMPLE_INTERVAL <= 100);
+ KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100);
setup_test_cache(test, size, 0, NULL);
buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
* 100x the sample interval should be more than enough to ensure we get
* a KFENCE allocation eventually.
*/
- timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
+ timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
do {
void *objects[100];
int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
#include <linux/hashtable.h>
#include <linux/userfaultfd_k.h>
#include <linux/page_idle.h>
+#include <linux/page_table_check.h>
#include <linux/swapops.h>
#include <linux/shmem_fs.h>
return 0;
}
+static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp)
+{
+ spinlock_t *ptl;
+ pmd_t pmd;
+
+ mmap_assert_write_locked(mm);
+ ptl = pmd_lock(vma->vm_mm, pmdp);
+ pmd = pmdp_collapse_flush(vma, addr, pmdp);
+ spin_unlock(ptl);
+ mm_dec_nr_ptes(mm);
+ page_table_check_pte_clear_range(mm, addr, pmd);
+ pte_free(mm, pmd_pgtable(pmd));
+}
+
/**
* collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
* address haddr.
struct vm_area_struct *vma = find_vma(mm, haddr);
struct page *hpage;
pte_t *start_pte, *pte;
- pmd_t *pmd, _pmd;
+ pmd_t *pmd;
spinlock_t *ptl;
int count = 0;
int i;
}
/* step 4: collapse pmd */
- ptl = pmd_lock(vma->vm_mm, pmd);
- _pmd = pmdp_collapse_flush(vma, haddr, pmd);
- spin_unlock(ptl);
- mm_dec_nr_ptes(mm);
- pte_free(mm, pmd_pgtable(_pmd));
-
+ collapse_and_free_pmd(mm, vma, haddr, pmd);
drop_hpage:
unlock_page(hpage);
put_page(hpage);
struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long addr;
- pmd_t *pmd, _pmd;
+ pmd_t *pmd;
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
* reverse order. Trylock is a way to avoid deadlock.
*/
if (mmap_write_trylock(mm)) {
- if (!khugepaged_test_exit(mm)) {
- spinlock_t *ptl = pmd_lock(mm, pmd);
- /* assume page table is clear */
- _pmd = pmdp_collapse_flush(vma, addr, pmd);
- spin_unlock(ptl);
- mm_dec_nr_ptes(mm);
- pte_free(mm, pmd_pgtable(_pmd));
- }
+ if (!khugepaged_test_exit(mm))
+ collapse_and_free_pmd(mm, vma, addr, pmd);
mmap_write_unlock(mm);
} else {
/* Try again later */
{
unsigned long flags;
struct kmemleak_object *object;
- int i;
+ struct zone *zone;
+ int __maybe_unused i;
int new_leaks = 0;
jiffies_last_scan = jiffies;
* Struct page scanning for each node.
*/
get_online_mems();
- for_each_online_node(i) {
- unsigned long start_pfn = node_start_pfn(i);
- unsigned long end_pfn = node_end_pfn(i);
+ for_each_populated_zone(zone) {
+ unsigned long start_pfn = zone->zone_start_pfn;
+ unsigned long end_pfn = zone_end_pfn(zone);
unsigned long pfn;
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
if (!page)
continue;
- /* only scan pages belonging to this node */
- if (page_to_nid(page) != i)
+ /* only scan pages belonging to this zone */
+ if (page_zone(page) != zone)
continue;
/* only scan if page is in use */
if (page_count(page) == 0)
}
#ifdef CONFIG_MEMCG_KMEM
-extern spinlock_t css_set_lock;
+static DEFINE_SPINLOCK(objcg_lock);
bool mem_cgroup_kmem_disabled(void)
{
if (nr_pages)
obj_cgroup_uncharge_pages(objcg, nr_pages);
- spin_lock_irqsave(&css_set_lock, flags);
+ spin_lock_irqsave(&objcg_lock, flags);
list_del(&objcg->list);
- spin_unlock_irqrestore(&css_set_lock, flags);
+ spin_unlock_irqrestore(&objcg_lock, flags);
percpu_ref_exit(ref);
kfree_rcu(objcg, rcu);
objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
- spin_lock_irq(&css_set_lock);
+ spin_lock_irq(&objcg_lock);
/* 1) Ready to reparent active objcg. */
list_add(&objcg->list, &memcg->objcg_list);
/* 3) Move already reparented objcgs to the parent's list */
list_splice(&memcg->objcg_list, &parent->objcg_list);
- spin_unlock_irq(&css_set_lock);
+ spin_unlock_irq(&objcg_lock);
percpu_ref_kill(&objcg->refcnt);
}
/* Also skip shared copy-on-write pages */
if (is_cow_mapping(vma->vm_flags) &&
- page_mapcount(page) != 1)
+ page_count(page) != 1)
continue;
/*
* onlining - just onlined memory won't immediately be considered for
* allocation.
*/
- if (!isolated_page && PageBuddy(page)) {
+ if (!isolated_page) {
nr_pages = move_freepages_block(zone, page, migratetype, NULL);
__mod_zone_freepage_state(zone, nr_pages, migratetype);
}
{
struct page_ext *page_ext;
struct page *page;
+ unsigned long i;
bool anon;
- int i;
if (!pfn_valid(pfn))
return;
{
struct page_ext *page_ext;
struct page *page;
+ unsigned long i;
bool anon;
- int i;
if (!pfn_valid(pfn))
return;
void __page_table_check_zero(struct page *page, unsigned int order)
{
struct page_ext *page_ext = lookup_page_ext(page);
- int i;
+ unsigned long i;
BUG_ON(!page_ext);
- for (i = 0; i < (1 << order); i++) {
+ for (i = 0; i < (1ul << order); i++) {
struct page_table_check *ptc = get_page_table_check(page_ext);
BUG_ON(atomic_read(&ptc->anon_map_count));
void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- pte_t old_pte;
-
if (&init_mm == mm)
return;
- old_pte = *ptep;
- if (pte_user_accessible_page(old_pte)) {
- page_table_check_clear(mm, addr, pte_pfn(old_pte),
- PAGE_SIZE >> PAGE_SHIFT);
- }
-
+ __page_table_check_pte_clear(mm, addr, *ptep);
if (pte_user_accessible_page(pte)) {
page_table_check_set(mm, addr, pte_pfn(pte),
PAGE_SIZE >> PAGE_SHIFT,
void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
- pmd_t old_pmd;
-
if (&init_mm == mm)
return;
- old_pmd = *pmdp;
- if (pmd_user_accessible_page(old_pmd)) {
- page_table_check_clear(mm, addr, pmd_pfn(old_pmd),
- PMD_PAGE_SIZE >> PAGE_SHIFT);
- }
-
+ __page_table_check_pmd_clear(mm, addr, *pmdp);
if (pmd_user_accessible_page(pmd)) {
page_table_check_set(mm, addr, pmd_pfn(pmd),
PMD_PAGE_SIZE >> PAGE_SHIFT,
void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
- pud_t old_pud;
-
if (&init_mm == mm)
return;
- old_pud = *pudp;
- if (pud_user_accessible_page(old_pud)) {
- page_table_check_clear(mm, addr, pud_pfn(old_pud),
- PUD_PAGE_SIZE >> PAGE_SHIFT);
- }
-
+ __page_table_check_pud_clear(mm, addr, *pudp);
if (pud_user_accessible_page(pud)) {
page_table_check_set(mm, addr, pud_pfn(pud),
PUD_PAGE_SIZE >> PAGE_SHIFT,
}
}
EXPORT_SYMBOL(__page_table_check_pud_set);
+
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (&init_mm == mm)
+ return;
+
+ if (!pmd_bad(pmd) && !pmd_leaf(pmd)) {
+ pte_t *ptep = pte_offset_map(&pmd, addr);
+ unsigned long i;
+
+ pte_unmap(ptep);
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ __page_table_check_pte_clear(mm, addr, *ptep);
+ addr += PAGE_SIZE;
+ ptep++;
+ }
+ }
+}
* forward progress (e.g. journalling workqueues or kthreads).
*/
if (!current_is_kswapd() &&
- current->flags & (PF_IO_WORKER|PF_KTHREAD))
+ current->flags & (PF_IO_WORKER|PF_KTHREAD)) {
+ cond_resched();
return;
+ }
/*
* These figures are pulled out of thin air.
u32 skb_prio, u16 vlan_prio);
int vlan_dev_set_egress_priority(const struct net_device *dev,
u32 skb_prio, u16 vlan_prio);
+void vlan_dev_free_egress_priority(const struct net_device *dev);
int vlan_dev_change_flags(const struct net_device *dev, u32 flag, u32 mask);
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result,
size_t size);
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
-void vlan_dev_uninit(struct net_device *dev);
bool vlan_dev_inherit_address(struct net_device *dev,
struct net_device *real_dev);
}
/* Note: this function might be called multiple times for the same device. */
-void vlan_dev_uninit(struct net_device *dev)
+void vlan_dev_free_egress_priority(const struct net_device *dev)
{
struct vlan_priority_tci_mapping *pm;
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
}
}
+static void vlan_dev_uninit(struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+ vlan_dev_free_egress_priority(dev);
+
+ /* Get rid of the vlan's reference to real_dev */
+ dev_put_track(vlan->real_dev, &vlan->dev_tracker);
+}
+
static netdev_features_t vlan_dev_fix_features(struct net_device *dev,
netdev_features_t features)
{
free_percpu(vlan->vlan_pcpu_stats);
vlan->vlan_pcpu_stats = NULL;
-
- /* Get rid of the vlan's reference to real_dev */
- dev_put_track(vlan->real_dev, &vlan->dev_tracker);
}
void vlan_setup(struct net_device *dev)
return -EINVAL;
err = vlan_changelink(dev, tb, data, extack);
- if (!err)
- err = register_vlan_dev(dev, extack);
if (err)
- vlan_dev_uninit(dev);
+ return err;
+ err = register_vlan_dev(dev, extack);
+ if (err)
+ vlan_dev_free_egress_priority(dev);
return err;
}
spin_unlock_bh(&ax25_list_lock);
lock_sock(sk);
s->ax25_dev = NULL;
+ dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev_put(ax25_dev);
- release_sock(sk);
ax25_disconnect(s, ENETUNREACH);
+ release_sock(sk);
spin_lock_bh(&ax25_list_lock);
sock_put(sk);
/* The entry could have been deleted from the
}
}
- if (ax25_dev != NULL)
+ if (ax25_dev) {
ax25_fillin_cb(ax25, ax25_dev);
+ dev_hold_track(ax25_dev->dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
+ }
done:
ax25_cb_add(ax25);
struct net_bridge_port_group *pg);
static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
+static int br_mc_disabled_update(struct net_device *dev, bool value,
+ struct netlink_ext_ack *extack);
+
static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge *br,
struct net_bridge_port_group_sg_key *sg_p)
return mp;
if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+ br_mc_disabled_update(br->dev, false, NULL);
br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
return ERR_PTR(-E2BIG);
}
struct net_bridge_port_group *pg,
int type);
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
- struct netlink_ext_ack *extack);
+ bool changed, struct netlink_ext_ack *extack);
int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid);
void br_switchdev_init(struct net_bridge *br);
return 0;
}
-static inline int br_switchdev_port_vlan_add(struct net_device *dev,
- u16 vid, u16 flags,
+static inline int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid,
+ u16 flags, bool changed,
struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
- struct netlink_ext_ack *extack)
+ bool changed, struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan v = {
.obj.orig_dev = dev,
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
.flags = flags,
.vid = vid,
+ .changed = changed,
};
return switchdev_port_obj_add(dev, &v.obj, extack);
return notifier_to_errno(err);
}
-static int br_switchdev_vlan_replay(struct net_device *br_dev,
- struct net_device *dev,
- const void *ctx, bool adding,
- struct notifier_block *nb,
- struct netlink_ext_ack *extack)
+static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
+ struct net_device *dev,
+ struct net_bridge_vlan_group *vg,
+ const void *ctx, unsigned long action,
+ struct netlink_ext_ack *extack)
{
- struct net_bridge_vlan_group *vg;
struct net_bridge_vlan *v;
- struct net_bridge_port *p;
- struct net_bridge *br;
- unsigned long action;
int err = 0;
u16 pvid;
- ASSERT_RTNL();
-
- if (!nb)
- return 0;
-
- if (!netif_is_bridge_master(br_dev))
- return -EINVAL;
-
- if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
- return -EINVAL;
-
- if (netif_is_bridge_master(dev)) {
- br = netdev_priv(dev);
- vg = br_vlan_group(br);
- p = NULL;
- } else {
- p = br_port_get_rtnl(dev);
- if (WARN_ON(!p))
- return -EINVAL;
- vg = nbp_vlan_group(p);
- br = p->br;
- }
-
if (!vg)
return 0;
- if (adding)
- action = SWITCHDEV_PORT_OBJ_ADD;
- else
- action = SWITCHDEV_PORT_OBJ_DEL;
-
pvid = br_get_pvid(vg);
list_for_each_entry(v, &vg->vlan_list, vlist) {
return err;
}
- return err;
+ return 0;
+}
+
+static int br_switchdev_vlan_replay(struct net_device *br_dev,
+ const void *ctx, bool adding,
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge *br = netdev_priv(br_dev);
+ struct net_bridge_port *p;
+ unsigned long action;
+ int err;
+
+ ASSERT_RTNL();
+
+ if (!nb)
+ return 0;
+
+ if (!netif_is_bridge_master(br_dev))
+ return -EINVAL;
+
+ if (adding)
+ action = SWITCHDEV_PORT_OBJ_ADD;
+ else
+ action = SWITCHDEV_PORT_OBJ_DEL;
+
+ err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
+ ctx, action, extack);
+ if (err)
+ return err;
+
+ list_for_each_entry(p, &br->port_list, list) {
+ struct net_device *dev = p->dev;
+
+ err = br_switchdev_vlan_replay_group(nb, dev,
+ nbp_vlan_group(p),
+ ctx, action, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
struct net_device *dev = p->dev;
int err;
- err = br_switchdev_vlan_replay(br_dev, dev, ctx, true, blocking_nb,
- extack);
+ err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
if (err && err != -EOPNOTSUPP)
return err;
struct net_device *br_dev = p->br->dev;
struct net_device *dev = p->dev;
- br_switchdev_vlan_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
+ br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
- br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
+ br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
}
/* Let the bridge know that this port is offloaded, so that it can assign a
return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
}
-static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
+static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
const struct net_bridge_vlan *v)
{
if (vg->pvid == v->vid)
- return false;
+ return;
smp_wmb();
br_vlan_set_pvid_state(vg, v->state);
vg->pvid = v->vid;
-
- return true;
}
-static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
+static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
{
if (vg->pvid != vid)
- return false;
+ return;
smp_wmb();
vg->pvid = 0;
-
- return true;
}
-/* return true if anything changed, false otherwise */
-static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
+/* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
+ * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
+ * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
+ */
+static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
+ bool commit)
{
struct net_bridge_vlan_group *vg;
- u16 old_flags = v->flags;
- bool ret;
+ bool change;
if (br_vlan_is_master(v))
vg = br_vlan_group(v->br);
else
vg = nbp_vlan_group(v->port);
+ /* check if anything would be changed on commit */
+ change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
+ ((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
+
+ if (!commit)
+ goto out;
+
if (flags & BRIDGE_VLAN_INFO_PVID)
- ret = __vlan_add_pvid(vg, v);
+ __vlan_add_pvid(vg, v);
else
- ret = __vlan_delete_pvid(vg, v->vid);
+ __vlan_delete_pvid(vg, v->vid);
if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
else
v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
- return ret || !!(old_flags ^ v->flags);
+out:
+ return change;
+}
+
+static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
+{
+ return __vlan_flags_update(v, flags, false);
+}
+
+static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
+{
+ __vlan_flags_update(v, flags, true);
}
static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
/* Try switchdev op first. In case it is not supported, fallback to
* 8021q add.
*/
- err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
if (err == -EOPNOTSUPP)
return vlan_vid_add(dev, br->vlan_proto, v->vid);
v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
}
br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
} else {
- err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
- if (err && err != -EOPNOTSUPP)
- goto out;
+ if (br_vlan_should_use(v)) {
+ err = br_switchdev_port_vlan_add(dev, v->vid, flags,
+ false, extack);
+ if (err && err != -EOPNOTSUPP)
+ goto out;
+ }
br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
}
goto out_fdb_insert;
__vlan_add_list(v);
- __vlan_add_flags(v, flags);
+ __vlan_flags_commit(v, flags);
br_multicast_toggle_one_vlan(v, true);
if (p)
{
struct net_bridge_vlan *vlan, *tmp;
u16 v_start = 0, v_end = 0;
+ int err;
__vlan_delete_pvid(vg, vg->pvid);
list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
}
v_end = vlan->vid;
- __vlan_del(vlan);
+ err = __vlan_del(vlan);
+ if (err) {
+ br_err(br,
+ "port %u(%s) failed to delete vlan %d: %pe\n",
+ (unsigned int) p->port_no, p->dev->name,
+ vlan->vid, ERR_PTR(err));
+ }
}
/* notify about the last/whole vlan range */
u16 flags, bool *changed,
struct netlink_ext_ack *extack)
{
+ bool would_change = __vlan_flags_would_change(vlan, flags);
+ bool becomes_brentry = false;
int err;
- err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
- if (err && err != -EOPNOTSUPP)
- return err;
-
if (!br_vlan_is_brentry(vlan)) {
/* Trying to change flags of non-existent bridge vlan */
- if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
- err = -EINVAL;
- goto err_flags;
- }
+ if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return -EINVAL;
+
+ becomes_brentry = true;
+ }
+
+ /* Master VLANs that aren't brentries weren't notified before,
+ * time to notify them now.
+ */
+ if (becomes_brentry || would_change) {
+ err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
+ would_change, extack);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+ }
+
+ if (becomes_brentry) {
/* It was only kept for port vlans, now make it real */
err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
if (err) {
br_multicast_toggle_one_vlan(vlan, true);
}
- if (__vlan_add_flags(vlan, flags))
+ __vlan_flags_commit(vlan, flags);
+ if (would_change)
*changed = true;
return 0;
err_fdb_insert:
-err_flags:
br_switchdev_port_vlan_del(br->dev, vlan->vid);
return err;
}
*changed = false;
vlan = br_vlan_find(nbp_vlan_group(port), vid);
if (vlan) {
- /* Pass the flags to the hardware bridge */
- ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
- if (ret && ret != -EOPNOTSUPP)
- return ret;
- *changed = __vlan_add_flags(vlan, flags);
+ bool would_change = __vlan_flags_would_change(vlan, flags);
+
+ if (would_change) {
+ /* Pass the flags to the hardware bridge */
+ ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
+ true, extack);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ __vlan_flags_commit(vlan, flags);
+ *changed = would_change;
return 0;
}
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/spinlock.h>
#include <linux/hrtimer.h>
#include <linux/wait.h>
#include <linux/uio.h>
struct tpcon rx, tx;
struct list_head notifier;
wait_queue_head_t wait;
+ spinlock_t rx_lock; /* protect single thread state machine */
};
static LIST_HEAD(isotp_notifier_list);
n_pci_type = cf->data[ae] & 0xF0;
+ /* Make sure the state changes and data structures stay consistent at
+ * CAN frame reception time. This locking is not needed in real world
+ * use cases but the inconsistency can be triggered with syzkaller.
+ */
+ spin_lock(&so->rx_lock);
+
if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
/* check rx/tx path half duplex expectations */
if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
(so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
- return;
+ goto out_unlock;
}
switch (n_pci_type) {
isotp_rcv_cf(sk, cf, ae, skb);
break;
}
+
+out_unlock:
+ spin_unlock(&so->rx_lock);
}
static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
if (!size || size > MAX_MSG_LENGTH) {
err = -EINVAL;
- goto err_out;
+ goto err_out_drop;
}
/* take care of a potential SF_DL ESC offset for TX_DL > 8 */
if ((so->opt.flags & CAN_ISOTP_SF_BROADCAST) &&
(size > so->tx.ll_dl - SF_PCI_SZ4 - ae - off)) {
err = -EINVAL;
- goto err_out;
+ goto err_out_drop;
}
err = memcpy_from_msg(so->tx.buf, msg, size);
if (err < 0)
- goto err_out;
+ goto err_out_drop;
dev = dev_get_by_index(sock_net(sk), so->ifindex);
if (!dev) {
err = -ENXIO;
- goto err_out;
+ goto err_out_drop;
}
skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
dev_put(dev);
- goto err_out;
+ goto err_out_drop;
}
can_skb_reserve(skb);
if (err) {
pr_notice_once("can-isotp: %s: can_send_ret %pe\n",
__func__, ERR_PTR(err));
- goto err_out;
+ goto err_out_drop;
}
if (wait_tx_done) {
return size;
+err_out_drop:
+ /* drop this PDU and unlock a potential wait queue */
+ old_state = ISOTP_IDLE;
err_out:
so->tx.state = old_state;
if (so->tx.state == ISOTP_IDLE)
so->txtimer.function = isotp_tx_timer_handler;
init_waitqueue_head(&so->wait);
+ spin_lock_init(&so->rx_lock);
spin_lock(&isotp_notifier_lock);
list_add_tail(&so->notifier, &isotp_notifier_list);
Opt_cephx_sign_messages,
Opt_tcp_nodelay,
Opt_abort_on_full,
+ Opt_rxbounce,
};
enum {
fsparam_u32 ("osdkeepalive", Opt_osdkeepalivetimeout),
fsparam_enum ("read_from_replica", Opt_read_from_replica,
ceph_param_read_from_replica),
+ fsparam_flag ("rxbounce", Opt_rxbounce),
fsparam_enum ("ms_mode", Opt_ms_mode,
ceph_param_ms_mode),
fsparam_string ("secret", Opt_secret),
case Opt_abort_on_full:
opt->flags |= CEPH_OPT_ABORT_ON_FULL;
break;
+ case Opt_rxbounce:
+ opt->flags |= CEPH_OPT_RXBOUNCE;
+ break;
default:
BUG();
seq_puts(m, "notcp_nodelay,");
if (show_all && (opt->flags & CEPH_OPT_ABORT_ON_FULL))
seq_puts(m, "abort_on_full,");
+ if (opt->flags & CEPH_OPT_RXBOUNCE)
+ seq_puts(m, "rxbounce,");
if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
seq_printf(m, "mount_timeout=%d,",
ceph_msg_put(con->out_msg);
con->out_msg = NULL;
}
+ if (con->bounce_page) {
+ __free_page(con->bounce_page);
+ con->bounce_page = NULL;
+ }
if (ceph_msgr2(from_msgr(con->msgr)))
ceph_con_v2_reset_protocol(con);
static int read_partial_msg_data(struct ceph_connection *con)
{
- struct ceph_msg *msg = con->in_msg;
- struct ceph_msg_data_cursor *cursor = &msg->cursor;
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
struct page *page;
size_t page_offset;
u32 crc = 0;
int ret;
- if (!msg->num_data_items)
- return -EIO;
-
if (do_datacrc)
crc = con->in_data_crc;
while (cursor->total_resid) {
return 1; /* must return > 0 to indicate success */
}
+static int read_partial_msg_data_bounce(struct ceph_connection *con)
+{
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
+ struct page *page;
+ size_t off, len;
+ u32 crc;
+ int ret;
+
+ if (unlikely(!con->bounce_page)) {
+ con->bounce_page = alloc_page(GFP_NOIO);
+ if (!con->bounce_page) {
+ pr_err("failed to allocate bounce page\n");
+ return -ENOMEM;
+ }
+ }
+
+ crc = con->in_data_crc;
+ while (cursor->total_resid) {
+ if (!cursor->resid) {
+ ceph_msg_data_advance(cursor, 0);
+ continue;
+ }
+
+ page = ceph_msg_data_next(cursor, &off, &len, NULL);
+ ret = ceph_tcp_recvpage(con->sock, con->bounce_page, 0, len);
+ if (ret <= 0) {
+ con->in_data_crc = crc;
+ return ret;
+ }
+
+ crc = crc32c(crc, page_address(con->bounce_page), ret);
+ memcpy_to_page(page, off, page_address(con->bounce_page), ret);
+
+ ceph_msg_data_advance(cursor, ret);
+ }
+ con->in_data_crc = crc;
+
+ return 1; /* must return > 0 to indicate success */
+}
+
/*
* read (part of) a message.
*/
/* (page) data */
if (data_len) {
- ret = read_partial_msg_data(con);
+ if (!m->num_data_items)
+ return -EIO;
+
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
+ ret = read_partial_msg_data_bounce(con);
+ else
+ ret = read_partial_msg_data(con);
if (ret <= 0)
return ret;
}
#define IN_S_HANDLE_CONTROL_REMAINDER 3
#define IN_S_PREPARE_READ_DATA 4
#define IN_S_PREPARE_READ_DATA_CONT 5
-#define IN_S_HANDLE_EPILOGUE 6
-#define IN_S_FINISH_SKIP 7
+#define IN_S_PREPARE_READ_ENC_PAGE 6
+#define IN_S_HANDLE_EPILOGUE 7
+#define IN_S_FINISH_SKIP 8
#define OUT_S_QUEUE_DATA 1
#define OUT_S_QUEUE_DATA_CONT 2
padded_len(rem_len) + CEPH_GCM_TAG_LEN);
}
-static int decrypt_message(struct ceph_connection *con)
+static int decrypt_tail(struct ceph_connection *con)
{
+ struct sg_table enc_sgt = {};
struct sg_table sgt = {};
+ int tail_len;
int ret;
+ tail_len = tail_onwire_len(con->in_msg, true);
+ ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
+ con->v2.in_enc_page_cnt, 0, tail_len,
+ GFP_NOIO);
+ if (ret)
+ goto out;
+
ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
con->v2.in_buf, true);
if (ret)
goto out;
- ret = gcm_crypt(con, false, sgt.sgl, sgt.sgl,
- tail_onwire_len(con->in_msg, true));
+ dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
+ con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
+ ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
+ if (ret)
+ goto out;
+
+ WARN_ON(!con->v2.in_enc_page_cnt);
+ ceph_release_page_vector(con->v2.in_enc_pages,
+ con->v2.in_enc_page_cnt);
+ con->v2.in_enc_pages = NULL;
+ con->v2.in_enc_page_cnt = 0;
out:
sg_free_table(&sgt);
+ sg_free_table(&enc_sgt);
return ret;
}
return 0;
}
-static void prepare_read_data(struct ceph_connection *con)
+static int prepare_read_data(struct ceph_connection *con)
{
struct bio_vec bv;
- if (!con_secure(con))
- con->in_data_crc = -1;
+ con->in_data_crc = -1;
ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
data_len(con->in_msg));
get_bvec_at(&con->v2.in_cursor, &bv);
- set_in_bvec(con, &bv);
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ if (unlikely(!con->bounce_page)) {
+ con->bounce_page = alloc_page(GFP_NOIO);
+ if (!con->bounce_page) {
+ pr_err("failed to allocate bounce page\n");
+ return -ENOMEM;
+ }
+ }
+
+ bv.bv_page = con->bounce_page;
+ bv.bv_offset = 0;
+ set_in_bvec(con, &bv);
+ } else {
+ set_in_bvec(con, &bv);
+ }
con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
+ return 0;
}
static void prepare_read_data_cont(struct ceph_connection *con)
{
struct bio_vec bv;
- if (!con_secure(con))
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ con->in_data_crc = crc32c(con->in_data_crc,
+ page_address(con->bounce_page),
+ con->v2.in_bvec.bv_len);
+
+ get_bvec_at(&con->v2.in_cursor, &bv);
+ memcpy_to_page(bv.bv_page, bv.bv_offset,
+ page_address(con->bounce_page),
+ con->v2.in_bvec.bv_len);
+ } else {
con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
con->v2.in_bvec.bv_page,
con->v2.in_bvec.bv_offset,
con->v2.in_bvec.bv_len);
+ }
ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
if (con->v2.in_cursor.total_resid) {
get_bvec_at(&con->v2.in_cursor, &bv);
- set_in_bvec(con, &bv);
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ bv.bv_page = con->bounce_page;
+ bv.bv_offset = 0;
+ set_in_bvec(con, &bv);
+ } else {
+ set_in_bvec(con, &bv);
+ }
WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
return;
}
/*
- * We've read all data. Prepare to read data padding (if any)
- * and epilogue.
+ * We've read all data. Prepare to read epilogue.
*/
reset_in_kvecs(con);
- if (con_secure(con)) {
- if (need_padding(data_len(con->in_msg)))
- add_in_kvec(con, DATA_PAD(con->v2.in_buf),
- padding_len(data_len(con->in_msg)));
- add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_SECURE_LEN);
+ add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
+ con->v2.in_state = IN_S_HANDLE_EPILOGUE;
+}
+
+static int prepare_read_tail_plain(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->in_msg;
+
+ if (!front_len(msg) && !middle_len(msg)) {
+ WARN_ON(!data_len(msg));
+ return prepare_read_data(con);
+ }
+
+ reset_in_kvecs(con);
+ if (front_len(msg)) {
+ add_in_kvec(con, msg->front.iov_base, front_len(msg));
+ WARN_ON(msg->front.iov_len != front_len(msg));
+ }
+ if (middle_len(msg)) {
+ add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
+ WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
+ }
+
+ if (data_len(msg)) {
+ con->v2.in_state = IN_S_PREPARE_READ_DATA;
} else {
add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
+ con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
+ return 0;
+}
+
+static void prepare_read_enc_page(struct ceph_connection *con)
+{
+ struct bio_vec bv;
+
+ dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
+ con->v2.in_enc_resid);
+ WARN_ON(!con->v2.in_enc_resid);
+
+ bv.bv_page = con->v2.in_enc_pages[con->v2.in_enc_i];
+ bv.bv_offset = 0;
+ bv.bv_len = min(con->v2.in_enc_resid, (int)PAGE_SIZE);
+
+ set_in_bvec(con, &bv);
+ con->v2.in_enc_i++;
+ con->v2.in_enc_resid -= bv.bv_len;
+
+ if (con->v2.in_enc_resid) {
+ con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
+ return;
+ }
+
+ /*
+ * We are set to read the last piece of ciphertext (ending
+ * with epilogue) + auth tag.
+ */
+ WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
+static int prepare_read_tail_secure(struct ceph_connection *con)
+{
+ struct page **enc_pages;
+ int enc_page_cnt;
+ int tail_len;
+
+ tail_len = tail_onwire_len(con->in_msg, true);
+ WARN_ON(!tail_len);
+
+ enc_page_cnt = calc_pages_for(0, tail_len);
+ enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
+ if (IS_ERR(enc_pages))
+ return PTR_ERR(enc_pages);
+
+ WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
+ con->v2.in_enc_pages = enc_pages;
+ con->v2.in_enc_page_cnt = enc_page_cnt;
+ con->v2.in_enc_resid = tail_len;
+ con->v2.in_enc_i = 0;
+
+ prepare_read_enc_page(con);
+ return 0;
+}
+
static void __finish_skip(struct ceph_connection *con)
{
con->in_seq++;
}
msg = con->in_msg; /* set in process_message_header() */
- if (!front_len(msg) && !middle_len(msg)) {
- if (!data_len(msg))
- return process_message(con);
-
- prepare_read_data(con);
- return 0;
- }
-
- reset_in_kvecs(con);
if (front_len(msg)) {
WARN_ON(front_len(msg) > msg->front_alloc_len);
- add_in_kvec(con, msg->front.iov_base, front_len(msg));
msg->front.iov_len = front_len(msg);
-
- if (con_secure(con) && need_padding(front_len(msg)))
- add_in_kvec(con, FRONT_PAD(con->v2.in_buf),
- padding_len(front_len(msg)));
} else {
msg->front.iov_len = 0;
}
if (middle_len(msg)) {
WARN_ON(middle_len(msg) > msg->middle->alloc_len);
- add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
msg->middle->vec.iov_len = middle_len(msg);
-
- if (con_secure(con) && need_padding(middle_len(msg)))
- add_in_kvec(con, MIDDLE_PAD(con->v2.in_buf),
- padding_len(middle_len(msg)));
} else if (msg->middle) {
msg->middle->vec.iov_len = 0;
}
- if (data_len(msg)) {
- con->v2.in_state = IN_S_PREPARE_READ_DATA;
- } else {
- add_in_kvec(con, con->v2.in_buf,
- con_secure(con) ? CEPH_EPILOGUE_SECURE_LEN :
- CEPH_EPILOGUE_PLAIN_LEN);
- con->v2.in_state = IN_S_HANDLE_EPILOGUE;
- }
- return 0;
+ if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
+ return process_message(con);
+
+ if (con_secure(con))
+ return prepare_read_tail_secure(con);
+
+ return prepare_read_tail_plain(con);
}
static int handle_preamble(struct ceph_connection *con)
int ret;
if (con_secure(con)) {
- ret = decrypt_message(con);
+ ret = decrypt_tail(con);
if (ret) {
if (ret == -EBADMSG)
con->error_msg = "integrity error, bad epilogue auth tag";
ret = handle_control_remainder(con);
break;
case IN_S_PREPARE_READ_DATA:
- prepare_read_data(con);
- ret = 0;
+ ret = prepare_read_data(con);
break;
case IN_S_PREPARE_READ_DATA_CONT:
prepare_read_data_cont(con);
ret = 0;
break;
+ case IN_S_PREPARE_READ_ENC_PAGE:
+ prepare_read_enc_page(con);
+ ret = 0;
+ break;
case IN_S_HANDLE_EPILOGUE:
ret = handle_epilogue(con);
break;
static void revoke_at_prepare_read_data(struct ceph_connection *con)
{
- int remaining; /* data + [data padding] + epilogue */
+ int remaining;
int resid;
+ WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid);
- if (con_secure(con))
- remaining = padded_len(data_len(con->in_msg)) +
- CEPH_EPILOGUE_SECURE_LEN;
- else
- remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
-
+ remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
remaining);
con->v2.in_iter.count -= resid;
static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
{
int recved, resid; /* current piece of data */
- int remaining; /* [data padding] + epilogue */
+ int remaining;
+ WARN_ON(con_secure(con));
WARN_ON(!data_len(con->in_msg));
WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
ceph_msg_data_advance(&con->v2.in_cursor, recved);
WARN_ON(resid > con->v2.in_cursor.total_resid);
- if (con_secure(con))
- remaining = padding_len(data_len(con->in_msg)) +
- CEPH_EPILOGUE_SECURE_LEN;
- else
- remaining = CEPH_EPILOGUE_PLAIN_LEN;
-
+ remaining = CEPH_EPILOGUE_PLAIN_LEN;
dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
con->v2.in_cursor.total_resid, remaining);
con->v2.in_iter.count -= resid;
con->v2.in_state = IN_S_FINISH_SKIP;
}
+static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
+{
+ int resid; /* current enc page (not necessarily data) */
+
+ WARN_ON(!con_secure(con));
+ WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
+ resid = iov_iter_count(&con->v2.in_iter);
+ WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
+
+ dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
+ con->v2.in_enc_resid);
+ con->v2.in_iter.count -= resid;
+ set_in_skip(con, resid + con->v2.in_enc_resid);
+ con->v2.in_state = IN_S_FINISH_SKIP;
+}
+
static void revoke_at_handle_epilogue(struct ceph_connection *con)
{
int resid;
- WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
resid = iov_iter_count(&con->v2.in_iter);
WARN_ON(!resid);
case IN_S_PREPARE_READ_DATA_CONT:
revoke_at_prepare_read_data_cont(con);
break;
+ case IN_S_PREPARE_READ_ENC_PAGE:
+ revoke_at_prepare_read_enc_page(con);
+ break;
case IN_S_HANDLE_EPILOGUE:
revoke_at_handle_epilogue(con);
break;
clear_out_sign_kvecs(con);
free_conn_bufs(con);
+ if (con->v2.in_enc_pages) {
+ WARN_ON(!con->v2.in_enc_page_cnt);
+ ceph_release_page_vector(con->v2.in_enc_pages,
+ con->v2.in_enc_page_cnt);
+ con->v2.in_enc_pages = NULL;
+ con->v2.in_enc_page_cnt = 0;
+ }
if (con->v2.out_enc_pages) {
WARN_ON(!con->v2.out_enc_page_cnt);
ceph_release_page_vector(con->v2.out_enc_pages,
return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
}
-static inline void rps_lock(struct softnet_data *sd)
+static inline void rps_lock_irqsave(struct softnet_data *sd,
+ unsigned long *flags)
{
-#ifdef CONFIG_RPS
- spin_lock(&sd->input_pkt_queue.lock);
-#endif
+ if (IS_ENABLED(CONFIG_RPS))
+ spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
+ else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_save(*flags);
}
-static inline void rps_unlock(struct softnet_data *sd)
+static inline void rps_lock_irq_disable(struct softnet_data *sd)
{
-#ifdef CONFIG_RPS
- spin_unlock(&sd->input_pkt_queue.lock);
-#endif
+ if (IS_ENABLED(CONFIG_RPS))
+ spin_lock_irq(&sd->input_pkt_queue.lock);
+ else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_disable();
+}
+
+static inline void rps_unlock_irq_restore(struct softnet_data *sd,
+ unsigned long *flags)
+{
+ if (IS_ENABLED(CONFIG_RPS))
+ spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
+ else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_restore(*flags);
+}
+
+static inline void rps_unlock_irq_enable(struct softnet_data *sd)
+{
+ if (IS_ENABLED(CONFIG_RPS))
+ spin_unlock_irq(&sd->input_pkt_queue.lock);
+ else if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ local_irq_enable();
}
static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev,
* If yes, queue it to our IPI list and return 1
* If no, return 0
*/
-static int rps_ipi_queued(struct softnet_data *sd)
+static int napi_schedule_rps(struct softnet_data *sd)
{
-#ifdef CONFIG_RPS
struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
+#ifdef CONFIG_RPS
if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
return 1;
}
#endif /* CONFIG_RPS */
+ __napi_schedule_irqoff(&mysd->backlog);
return 0;
}
sd = &per_cpu(softnet_data, cpu);
- local_irq_save(flags);
-
- rps_lock(sd);
+ rps_lock_irqsave(sd, &flags);
if (!netif_running(skb->dev))
goto drop;
qlen = skb_queue_len(&sd->input_pkt_queue);
enqueue:
__skb_queue_tail(&sd->input_pkt_queue, skb);
input_queue_tail_incr_save(sd, qtail);
- rps_unlock(sd);
- local_irq_restore(flags);
+ rps_unlock_irq_restore(sd, &flags);
return NET_RX_SUCCESS;
}
/* Schedule NAPI for backlog device
* We can use non atomic operation since we own the queue lock
*/
- if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
- if (!rps_ipi_queued(sd))
- ____napi_schedule(sd, &sd->backlog);
- }
+ if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
+ napi_schedule_rps(sd);
goto enqueue;
}
drop:
sd->dropped++;
- rps_unlock(sd);
-
- local_irq_restore(flags);
+ rps_unlock_irq_restore(sd, &flags);
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
- preempt_disable();
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
- preempt_enable();
} else
#endif
{
unsigned int qtail;
- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
- put_cpu();
+ ret = enqueue_to_backlog(skb, smp_processor_id(), &qtail);
}
return ret;
}
+/**
+ * __netif_rx - Slightly optimized version of netif_rx
+ * @skb: buffer to post
+ *
+ * This behaves as netif_rx except that it does not disable bottom halves.
+ * As a result this function may only be invoked from the interrupt context
+ * (either hard or soft interrupt).
+ */
+int __netif_rx(struct sk_buff *skb)
+{
+ int ret;
+
+ lockdep_assert_once(hardirq_count() | softirq_count());
+
+ trace_netif_rx_entry(skb);
+ ret = netif_rx_internal(skb);
+ trace_netif_rx_exit(ret);
+ return ret;
+}
+EXPORT_SYMBOL(__netif_rx);
+
/**
* netif_rx - post buffer to the network code
* @skb: buffer to post
*
* This function receives a packet from a device driver and queues it for
- * the upper (protocol) levels to process. It always succeeds. The buffer
- * may be dropped during processing for congestion control or by the
- * protocol layers.
+ * the upper (protocol) levels to process via the backlog NAPI device. It
+ * always succeeds. The buffer may be dropped during processing for
+ * congestion control or by the protocol layers.
+ * The network buffer is passed via the backlog NAPI device. Modern NIC
+ * driver should use NAPI and GRO.
+ * This function can used from any context.
*
* return values:
* NET_RX_SUCCESS (no congestion)
* NET_RX_DROP (packet was dropped)
*
*/
-
int netif_rx(struct sk_buff *skb)
{
int ret;
+ local_bh_disable();
trace_netif_rx_entry(skb);
-
ret = netif_rx_internal(skb);
trace_netif_rx_exit(ret);
-
+ local_bh_enable();
return ret;
}
EXPORT_SYMBOL(netif_rx);
-int netif_rx_ni(struct sk_buff *skb)
-{
- int err;
-
- trace_netif_rx_ni_entry(skb);
-
- preempt_disable();
- err = netif_rx_internal(skb);
- if (local_softirq_pending())
- do_softirq();
- preempt_enable();
- trace_netif_rx_ni_exit(err);
-
- return err;
-}
-EXPORT_SYMBOL(netif_rx_ni);
-
-int netif_rx_any_context(struct sk_buff *skb)
-{
- /*
- * If invoked from contexts which do not invoke bottom half
- * processing either at return from interrupt or when softrqs are
- * reenabled, use netif_rx_ni() which invokes bottomhalf processing
- * directly.
- */
- if (in_interrupt())
- return netif_rx(skb);
- else
- return netif_rx_ni(skb);
-}
-EXPORT_SYMBOL(netif_rx_any_context);
-
static __latent_entropy void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
local_bh_disable();
sd = this_cpu_ptr(&softnet_data);
- local_irq_disable();
- rps_lock(sd);
+ rps_lock_irq_disable(sd);
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
input_queue_head_incr(sd);
}
}
- rps_unlock(sd);
- local_irq_enable();
+ rps_unlock_irq_enable(sd);
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
bool do_flush;
- local_irq_disable();
- rps_lock(sd);
+ rps_lock_irq_disable(sd);
/* as insertion into process_queue happens with the rps lock held,
* process_queue access may race only with dequeue
*/
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
!skb_queue_empty_lockless(&sd->process_queue);
- rps_unlock(sd);
- local_irq_enable();
+ rps_unlock_irq_enable(sd);
return do_flush;
#endif
}
- local_irq_disable();
- rps_lock(sd);
+ rps_lock_irq_disable(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
skb_queue_splice_tail_init(&sd->input_pkt_queue,
&sd->process_queue);
}
- rps_unlock(sd);
- local_irq_enable();
+ rps_unlock_irq_enable(sd);
}
return work;
#define WAIT_REFS_MIN_MSECS 1
#define WAIT_REFS_MAX_MSECS 250
/**
- * netdev_wait_allrefs - wait until all references are gone.
- * @dev: target net_device
+ * netdev_wait_allrefs_any - wait until all references are gone.
+ * @list: list of net_devices to wait on
*
* This is called when unregistering network devices.
*
* We can get stuck here if buggy protocols don't correctly
* call dev_put.
*/
-static void netdev_wait_allrefs(struct net_device *dev)
+static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
{
unsigned long rebroadcast_time, warning_time;
- int wait = 0, refcnt;
+ struct net_device *dev;
+ int wait = 0;
- linkwatch_forget_dev(dev);
+ list_for_each_entry(dev, list, todo_list)
+ linkwatch_forget_dev(dev);
rebroadcast_time = warning_time = jiffies;
- refcnt = netdev_refcnt_read(dev);
- while (refcnt != 1) {
+ list_for_each_entry(dev, list, todo_list)
+ if (netdev_refcnt_read(dev) == 1)
+ return dev;
+
+ while (true) {
if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
rtnl_lock();
/* Rebroadcast unregister notification */
- call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+ list_for_each_entry(dev, list, todo_list)
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
__rtnl_unlock();
rcu_barrier();
rtnl_lock();
- if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
- &dev->state)) {
- /* We must not have linkwatch events
- * pending on unregister. If this
- * happens, we simply run the queue
- * unscheduled, resulting in a noop
- * for this device.
- */
- linkwatch_run_queue();
- }
+ list_for_each_entry(dev, list, todo_list)
+ if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &dev->state)) {
+ /* We must not have linkwatch events
+ * pending on unregister. If this
+ * happens, we simply run the queue
+ * unscheduled, resulting in a noop
+ * for this device.
+ */
+ linkwatch_run_queue();
+ break;
+ }
__rtnl_unlock();
wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
}
- refcnt = netdev_refcnt_read(dev);
+ list_for_each_entry(dev, list, todo_list)
+ if (netdev_refcnt_read(dev) == 1)
+ return dev;
- if (refcnt != 1 &&
- time_after(jiffies, warning_time +
+ if (time_after(jiffies, warning_time +
netdev_unregister_timeout_secs * HZ)) {
- pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
- dev->name, refcnt);
- ref_tracker_dir_print(&dev->refcnt_tracker, 10);
+ list_for_each_entry(dev, list, todo_list) {
+ pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
+ dev->name, netdev_refcnt_read(dev));
+ ref_tracker_dir_print(&dev->refcnt_tracker, 10);
+ }
+
warning_time = jiffies;
}
}
*/
void netdev_run_todo(void)
{
+ struct net_device *dev, *tmp;
struct list_head list;
#ifdef CONFIG_LOCKDEP
struct list_head unlink_list;
__rtnl_unlock();
-
/* Wait for rcu callbacks to finish before next phase */
if (!list_empty(&list))
rcu_barrier();
- while (!list_empty(&list)) {
- struct net_device *dev
- = list_first_entry(&list, struct net_device, todo_list);
- list_del(&dev->todo_list);
-
+ list_for_each_entry_safe(dev, tmp, &list, todo_list) {
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
- pr_err("network todo '%s' but state %d\n",
- dev->name, dev->reg_state);
- dump_stack();
+ netdev_WARN(dev, "run_todo but not unregistering\n");
+ list_del(&dev->todo_list);
continue;
}
dev->reg_state = NETREG_UNREGISTERED;
+ }
- netdev_wait_allrefs(dev);
+ while (!list_empty(&list)) {
+ dev = netdev_wait_allrefs_any(&list);
+ list_del(&dev->todo_list);
/* paranoia */
BUG_ON(netdev_refcnt_read(dev) != 1);
rcu_read_lock();
list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
+ struct net_device *dev;
+
/*
* only add a note to our monitor buffer if:
* 1) this is the dev we received on
* 2) its after the last_rx delta
* 3) our rx_dropped count has gone up
*/
- if ((new_stat->dev == napi->dev) &&
+ /* Paired with WRITE_ONCE() in dropmon_net_event() */
+ dev = READ_ONCE(new_stat->dev);
+ if ((dev == napi->dev) &&
(time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
(napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
trace_drop_common(NULL, NULL);
mutex_lock(&net_dm_mutex);
list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) {
if (new_stat->dev == dev) {
- new_stat->dev = NULL;
+
+ /* Paired with READ_ONCE() in trace_napi_poll_hit() */
+ WRITE_ONCE(new_stat->dev, NULL);
+
if (trace_state == TRACE_OFF) {
list_del_rcu(&new_stat->list);
kfree_rcu(new_stat, rcu);
if (unlikely(flags))
return -EINVAL;
+ if (unlikely(len == 0))
+ return 0;
+
/* First find the starting scatterlist element */
i = msg->sg.start;
do {
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
+ struct Qdisc *qdisc;
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (tgt_netnsid >= 0 && nla_put_s32(skb, IFLA_TARGET_NETNSID, tgt_netnsid))
goto nla_put_failure;
+ qdisc = rtnl_dereference(dev->qdisc);
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
nla_put_u8(skb, IFLA_OPERSTATE,
#endif
put_master_ifindex(skb, dev) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
- (dev->qdisc &&
- nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
+ (qdisc &&
+ nla_put_string(skb, IFLA_QDISC, qdisc->ops->id)) ||
nla_put_ifalias(skb, dev) ||
nla_put_u32(skb, IFLA_CARRIER_CHANGES,
atomic_read(&dev->carrier_up_count) +
return -ENODEV;
filter_mask = ifsm->filter_mask;
- if (!filter_mask)
+ if (!filter_mask) {
+ NL_SET_ERR_MSG(extack, "Filter mask must be set for stats get");
return -EINVAL;
+ }
nskb = nlmsg_new(if_nlmsg_stats_size(dev, filter_mask), GFP_KERNEL);
if (!nskb)
* while trying to recycle fragments on __skb_frag_unref() we need
* to make one SKB responsible for triggering the recycle path.
* So disable the recycling bit if an SKB is cloned and we have
- * additional references to to the fragmented part of the SKB.
+ * additional references to the fragmented part of the SKB.
* Eventually the last SKB will have the recycling bit set and it's
* dataref set to 0, which will trigger the recycling
*/
{
flush_workqueue(dsa_owq);
}
+EXPORT_SYMBOL_GPL(dsa_flush_workqueue);
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
return 0;
mutex_init(&dp->addr_lists_lock);
+ mutex_init(&dp->vlans_lock);
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
+ INIT_LIST_HEAD(&dp->vlans);
if (ds->ops->port_setup) {
err = ds->ops->port_setup(ds, dp->index);
struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a, *tmp;
struct net_device *slave;
+ struct dsa_vlan *v, *n;
if (!dp->setup)
return;
kfree(a);
}
+ list_for_each_entry_safe(v, n, &dp->vlans, list) {
+ list_del(&v->list);
+ kfree(v);
+ }
+
dp->setup = false;
}
void dsa_switch_shutdown(struct dsa_switch *ds)
{
struct net_device *master, *slave_dev;
- LIST_HEAD(unregister_list);
struct dsa_port *dp;
mutex_lock(&dsa2_mutex);
slave_dev = dp->slave;
netdev_upper_dev_unlink(master, slave_dev);
- /* Just unlinking ourselves as uppers of the master is not
- * sufficient. When the master net device unregisters, that will
- * also call dev_close, which we will catch as NETDEV_GOING_DOWN
- * and trigger a dev_close on our own devices (dsa_slave_close).
- * In turn, that will call dev_mc_unsync on the master's net
- * device. If the master is also a DSA switch port, this will
- * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
- * its own master. Lockdep will complain about the fact that
- * all cascaded masters have the same dsa_master_addr_list_lock_key,
- * which it normally would not do if the cascaded masters would
- * be in a proper upper/lower relationship, which we've just
- * destroyed.
- * To suppress the lockdep warnings, let's actually unregister
- * the DSA slave interfaces too, to avoid the nonsensical
- * multicast address list synchronization on shutdown.
- */
- unregister_netdevice_queue(slave_dev, &unregister_list);
}
- unregister_netdevice_many(&unregister_list);
+
+ /* Disconnect from further netdevice notifiers on the master,
+ * since netdev_uses_dsa() will now return false.
+ */
+ dsa_switch_for_each_cpu_port(dp, ds)
+ dp->master->dsa_ptr = NULL;
rtnl_unlock();
mutex_unlock(&dsa2_mutex);
DSA_NOTIFIER_HOST_MDB_DEL,
DSA_NOTIFIER_VLAN_ADD,
DSA_NOTIFIER_VLAN_DEL,
+ DSA_NOTIFIER_HOST_VLAN_ADD,
+ DSA_NOTIFIER_HOST_VLAN_DEL,
DSA_NOTIFIER_MTU,
DSA_NOTIFIER_TAG_PROTO,
DSA_NOTIFIER_TAG_PROTO_CONNECT,
const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
bool dsa_schedule_work(struct work_struct *work);
-void dsa_flush_workqueue(void);
const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
struct netlink_ext_ack *extack);
int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
+int dsa_port_host_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack);
+int dsa_port_host_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan);
int dsa_port_mrp_add(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp);
int dsa_port_mrp_del(const struct dsa_port *dp,
.attrs = dsa_slave_attrs,
};
-static struct lock_class_key dsa_master_addr_list_lock_key;
-
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
struct dsa_switch *ds = cpu_dp->ds;
wmb();
dev->dsa_ptr = cpu_dp;
- lockdep_set_class(&dev->addr_list_lock,
- &dsa_master_addr_list_lock_key);
dsa_master_set_promiscuity(dev, 1);
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
}
+int dsa_port_host_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .vlan = vlan,
+ .extack = extack,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_ADD, &info);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ vlan_vid_add(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
+
+ return err;
+}
+
+int dsa_port_host_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_notifier_vlan_info info = {
+ .sw_index = dp->ds->index,
+ .port = dp->index,
+ .vlan = vlan,
+ };
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ int err;
+
+ err = dsa_port_notify(dp, DSA_NOTIFIER_HOST_VLAN_DEL, &info);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ vlan_vid_del(cpu_dp->master, htons(ETH_P_8021Q), vlan->vid);
+
+ return err;
+}
+
int dsa_port_mrp_add(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct switchdev_obj_port_vlan vlan;
+ struct switchdev_obj_port_vlan *vlan;
int err;
if (dsa_port_skip_vlan_configuration(dp)) {
return 0;
}
- vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
* the same VID.
*/
if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
rcu_read_lock();
- err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
+ err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan);
rcu_read_unlock();
if (err) {
NL_SET_ERR_MSG_MOD(extack,
}
}
- err = dsa_port_vlan_add(dp, &vlan, extack);
- if (err)
- return err;
+ return dsa_port_vlan_add(dp, vlan, extack);
+}
+
+/* Offload a VLAN installed on the bridge or on a foreign interface by
+ * installing it as a VLAN towards the CPU port.
+ */
+static int dsa_slave_host_vlan_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan vlan;
+
+ /* Do nothing if this is a software bridge */
+ if (!dp->bridge)
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp)) {
+ NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN");
+ return 0;
+ }
- /* We need the dedicated CPU port to be a member of the VLAN as well.
- * Even though drivers often handle CPU membership in special ways,
+ vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ /* Even though drivers often handle CPU membership in special ways,
* it doesn't make sense to program a PVID, so clear this flag.
*/
vlan.flags &= ~BRIDGE_VLAN_INFO_PVID;
- err = dsa_port_vlan_add(dp->cpu_dp, &vlan, extack);
- if (err)
- return err;
-
- return vlan_vid_add(master, htons(ETH_P_8021Q), vlan.vid);
+ return dsa_port_host_vlan_add(dp, &vlan, extack);
}
static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- return -EOPNOTSUPP;
-
- err = dsa_slave_vlan_add(dev, obj, extack);
+ if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
+ err = dsa_slave_vlan_add(dev, obj, extack);
+ else
+ err = dsa_slave_host_vlan_add(dev, obj, extack);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
static int dsa_slave_vlan_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan *vlan;
- int err;
if (dsa_port_skip_vlan_configuration(dp))
return 0;
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- /* Do not deprogram the CPU port as it may be shared with other user
- * ports which can be members of this VLAN as well.
- */
- err = dsa_port_vlan_del(dp, vlan);
- if (err)
- return err;
+ return dsa_port_vlan_del(dp, vlan);
+}
- vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid);
+static int dsa_slave_host_vlan_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan *vlan;
- return 0;
+ /* Do nothing if this is a software bridge */
+ if (!dp->bridge)
+ return -EOPNOTSUPP;
+
+ if (dsa_port_skip_vlan_configuration(dp))
+ return 0;
+
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+
+ return dsa_port_host_vlan_del(dp, vlan);
}
static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev))
- return -EOPNOTSUPP;
-
- err = dsa_slave_vlan_del(dev, obj);
+ if (dsa_port_offloads_bridge_port(dp, obj->orig_dev))
+ err = dsa_slave_vlan_del(dev, obj);
+ else
+ err = dsa_slave_host_vlan_del(dev, obj);
break;
case SWITCHDEV_OBJ_ID_MRP:
if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return err;
}
-static int dsa_slave_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
- struct dsa_switch *ds = dp->ds;
- struct dsa_switch_tree *dst = ds->dst;
-
- /* For non-legacy ports, devlink is used and it takes
- * care of the name generation. This ndo implementation
- * should be removed with legacy support.
- */
- if (dp->ds->devlink)
- return -EOPNOTSUPP;
-
- ppid->id_len = sizeof(dst->index);
- memcpy(&ppid->id, &dst->index, ppid->id_len);
-
- return 0;
-}
-
static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev,
struct sk_buff *skb)
{
}
#endif
-static int dsa_slave_get_phys_port_name(struct net_device *dev,
- char *name, size_t len)
-{
- struct dsa_port *dp = dsa_slave_to_port(dev);
-
- /* For non-legacy ports, devlink is used and it takes
- * care of the name generation. This ndo implementation
- * should be removed with legacy support.
- */
- if (dp->ds->devlink)
- return -EOPNOTSUPP;
-
- if (snprintf(name, len, "p%d", dp->index) >= len)
- return -EINVAL;
-
- return 0;
-}
-
static struct dsa_mall_tc_entry *
dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie)
{
static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
}
/* And CPU port... */
- ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &extack);
+ ret = dsa_port_host_vlan_add(dp, &vlan, &extack);
if (ret) {
if (extack._msg)
netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index,
return ret;
}
- return vlan_vid_add(master, proto, vid);
+ return 0;
}
static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
- struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan = {
.vid = vid,
};
int err;
- /* Do not deprogram the CPU port as it may be shared with other user
- * ports which can be members of this VLAN as well.
- */
err = dsa_port_vlan_del(dp, &vlan);
if (err)
return err;
- vlan_vid_del(master, proto, vid);
-
- return 0;
+ return dsa_port_host_vlan_del(dp, &vlan);
}
static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- return dp->ds->devlink ? &dp->devlink_port : NULL;
+ return &dp->devlink_port;
}
static void dsa_slave_get_stats64(struct net_device *dev,
.ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup,
.ndo_poll_controller = dsa_slave_poll_controller,
#endif
- .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
.ndo_setup_tc = dsa_slave_setup_tc,
.ndo_get_stats64 = dsa_slave_get_stats64,
- .ndo_get_port_parent_id = dsa_slave_get_port_parent_id,
.ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid,
.ndo_get_devlink_port = dsa_slave_get_devlink_port,
slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
- struct netdev_queue *txq,
- void *_unused)
-{
- lockdep_set_class(&txq->_xmit_lock,
- &dsa_slave_netdev_xmit_lock_key);
-}
-
int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
- netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
- NULL);
-
SET_NETDEV_DEV(slave_dev, port->ds->dev);
slave_dev->dev.of_node = port->dn;
slave_dev->vlan_features = master->vlan_features;
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = switchdev_handle_port_obj_add(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_obj_add);
+ err = switchdev_handle_port_obj_add_foreign(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_foreign_dev_check,
+ dsa_slave_port_obj_add);
return notifier_from_errno(err);
case SWITCHDEV_PORT_OBJ_DEL:
- err = switchdev_handle_port_obj_del(dev, ptr,
- dsa_slave_dev_check,
- dsa_slave_port_obj_del);
+ err = switchdev_handle_port_obj_del_foreign(dev, ptr,
+ dsa_slave_dev_check,
+ dsa_foreign_dev_check,
+ dsa_slave_port_obj_del);
return notifier_from_errno(err);
case SWITCHDEV_PORT_ATTR_SET:
err = switchdev_handle_port_attr_set(dev, ptr,
return err;
}
+/* Port VLANs match on the targeted port and on all DSA ports */
static bool dsa_port_vlan_match(struct dsa_port *dp,
struct dsa_notifier_vlan_info *info)
{
return false;
}
+/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
+ * (upstream and downstream) of that switch and its upstream switches.
+ */
+static bool dsa_port_host_vlan_match(struct dsa_port *dp,
+ struct dsa_notifier_vlan_info *info)
+{
+ struct dsa_port *targeted_dp, *cpu_dp;
+ struct dsa_switch *targeted_ds;
+
+ targeted_ds = dsa_switch_find(dp->ds->dst->index, info->sw_index);
+ targeted_dp = dsa_to_port(targeted_ds, info->port);
+ cpu_dp = targeted_dp->cpu_dp;
+
+ if (dsa_switch_is_upstream_of(dp->ds, targeted_ds))
+ return dsa_port_is_dsa(dp) || dp == cpu_dp;
+
+ return false;
+}
+
+static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_vlan *v;
+
+ list_for_each_entry(v, vlan_list, list)
+ if (v->vid == vlan->vid)
+ return v;
+
+ return NULL;
+}
+
+static int dsa_port_do_vlan_add(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ struct dsa_vlan *v;
+ int err = 0;
+
+ /* No need to bother with refcounting for user ports. */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_vlan_add(ds, port, vlan, extack);
+
+ /* No need to propagate on shared ports the existing VLANs that were
+ * re-notified after just the flags have changed. This would cause a
+ * refcount bump which we need to avoid, since it unbalances the
+ * additions with the deletions.
+ */
+ if (vlan->changed)
+ return 0;
+
+ mutex_lock(&dp->vlans_lock);
+
+ v = dsa_vlan_find(&dp->vlans, vlan);
+ if (v) {
+ refcount_inc(&v->refcount);
+ goto out;
+ }
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = ds->ops->port_vlan_add(ds, port, vlan, extack);
+ if (err) {
+ kfree(v);
+ goto out;
+ }
+
+ v->vid = vlan->vid;
+ refcount_set(&v->refcount, 1);
+ list_add_tail(&v->list, &dp->vlans);
+
+out:
+ mutex_unlock(&dp->vlans_lock);
+
+ return err;
+}
+
+static int dsa_port_do_vlan_del(struct dsa_port *dp,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ struct dsa_vlan *v;
+ int err = 0;
+
+ /* No need to bother with refcounting for user ports */
+ if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
+ return ds->ops->port_vlan_del(ds, port, vlan);
+
+ mutex_lock(&dp->vlans_lock);
+
+ v = dsa_vlan_find(&dp->vlans, vlan);
+ if (!v) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ if (!refcount_dec_and_test(&v->refcount))
+ goto out;
+
+ err = ds->ops->port_vlan_del(ds, port, vlan);
+ if (err) {
+ refcount_set(&v->refcount, 1);
+ goto out;
+ }
+
+ list_del(&v->list);
+ kfree(v);
+
+out:
+ mutex_unlock(&dp->vlans_lock);
+
+ return err;
+}
+
static int dsa_switch_vlan_add(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
dsa_switch_for_each_port(dp, ds) {
if (dsa_port_vlan_match(dp, info)) {
- err = ds->ops->port_vlan_add(ds, dp->index, info->vlan,
- info->extack);
+ err = dsa_port_do_vlan_add(dp, info->vlan,
+ info->extack);
if (err)
return err;
}
static int dsa_switch_vlan_del(struct dsa_switch *ds,
struct dsa_notifier_vlan_info *info)
{
+ struct dsa_port *dp;
+ int err;
+
if (!ds->ops->port_vlan_del)
return -EOPNOTSUPP;
- if (ds->index == info->sw_index)
- return ds->ops->port_vlan_del(ds, info->port, info->vlan);
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_vlan_match(dp, info)) {
+ err = dsa_port_do_vlan_del(dp, info->vlan);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ struct dsa_port *dp;
+ int err;
+
+ if (!ds->ops->port_vlan_add)
+ return -EOPNOTSUPP;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_vlan_match(dp, info)) {
+ err = dsa_port_do_vlan_add(dp, info->vlan,
+ info->extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
+ struct dsa_notifier_vlan_info *info)
+{
+ struct dsa_port *dp;
+ int err;
+
+ if (!ds->ops->port_vlan_del)
+ return -EOPNOTSUPP;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (dsa_port_host_vlan_match(dp, info)) {
+ err = dsa_port_do_vlan_del(dp, info->vlan);
+ if (err)
+ return err;
+ }
+ }
- /* Do not deprogram the DSA links as they may be used as conduit
- * for other VLAN members in the fabric.
- */
return 0;
}
case DSA_NOTIFIER_VLAN_DEL:
err = dsa_switch_vlan_del(ds, info);
break;
+ case DSA_NOTIFIER_HOST_VLAN_ADD:
+ err = dsa_switch_host_vlan_add(ds, info);
+ break;
+ case DSA_NOTIFIER_HOST_VLAN_DEL:
+ err = dsa_switch_host_vlan_del(ds, info);
+ break;
case DSA_NOTIFIER_MTU:
err = dsa_switch_mtu(ds, info);
break;
{
u16 vid, tci;
- skb_push_rcsum(skb, ETH_HLEN);
if (skb_vlan_tag_present(skb)) {
tci = skb_vlan_tag_get(skb);
__vlan_hwaccel_clear_tag(skb);
} else {
+ skb_push_rcsum(skb, ETH_HLEN);
__skb_vlan_pop(skb, &tci);
+ skb_pull_rcsum(skb, ETH_HLEN);
}
- skb_pull_rcsum(skb, ETH_HLEN);
vid = tci & VLAN_VID_MASK;
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev)
{
- __be16 *lan9303_tag;
u16 lan9303_tag1;
unsigned int source_port;
return NULL;
}
- lan9303_tag = dsa_etype_header_pos_rx(skb);
-
- if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
- dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
- return NULL;
+ if (skb_vlan_tag_present(skb)) {
+ lan9303_tag1 = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ skb_push_rcsum(skb, ETH_HLEN);
+ __skb_vlan_pop(skb, &lan9303_tag1);
+ skb_pull_rcsum(skb, ETH_HLEN);
}
- lan9303_tag1 = ntohs(lan9303_tag[1]);
source_port = lan9303_tag1 & 0x3;
skb->dev = dsa_master_find_slave(dev, 0, source_port);
return NULL;
}
- /* remove the special VLAN tag between the MAC addresses
- * and the current ethertype field.
- */
- skb_pull_rcsum(skb, 2 + 2);
-
- dsa_strip_etype_header(skb, LAN9303_TAG_LEN);
-
if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
dsa_default_offload_fwd_mark(skb);
if (!xmit_work_fn || !xmit_worker)
return NULL;
+ /* PTP over IP packets need UDP checksumming. We may have inherited
+ * NETIF_F_HW_CSUM from the DSA master, but these packets are not sent
+ * through the DSA master, so calculate the checksum here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb))
+ return NULL;
+
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
if (net->ipv4.fib_has_custom_local_routes ||
fib4_has_custom_rules(net))
goto full_check;
+ /* Within the same container, it is regarded as a martian source,
+ * and the same host but different containers are not.
+ */
if (inet_lookup_ifaddr_rcu(net, src))
return -EINVAL;
u8 fa_slen;
u32 tb_id;
s16 fa_default;
- u8 offload:1,
- trap:1,
- offload_failed:1,
- unused:5;
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
struct rcu_head rcu;
};
fri.dst_len = dst_len;
fri.tos = inet_dscp_to_dsfield(fa->fa_dscp);
fri.type = fa->fa_type;
- fri.offload = fa->offload;
- fri.trap = fa->trap;
- fri.offload_failed = fa->offload_failed;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb, info->portid, seq, event, &fri, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
if (!fa_match)
goto out;
- if (fa_match->offload == fri->offload && fa_match->trap == fri->trap &&
- fa_match->offload_failed == fri->offload_failed)
+ /* These are paired with the WRITE_ONCE() happening in this function.
+ * The reason is that we are only protected by RCU at this point.
+ */
+ if (READ_ONCE(fa_match->offload) == fri->offload &&
+ READ_ONCE(fa_match->trap) == fri->trap &&
+ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
- fa_match->offload = fri->offload;
- fa_match->trap = fri->trap;
+ WRITE_ONCE(fa_match->offload, fri->offload);
+ WRITE_ONCE(fa_match->trap, fri->trap);
/* 2 means send notifications only if offload_failed was changed. */
if (net->ipv4.sysctl_fib_notify_on_flag_change == 2 &&
- fa_match->offload_failed == fri->offload_failed)
+ READ_ONCE(fa_match->offload_failed) == fri->offload_failed)
goto out;
- fa_match->offload_failed = fri->offload_failed;
+ WRITE_ONCE(fa_match->offload_failed, fri->offload_failed);
if (!net->ipv4.sysctl_fib_notify_on_flag_change)
goto out;
fri.dst_len = KEYLENGTH - fa->fa_slen;
fri.tos = inet_dscp_to_dsfield(fa->fa_dscp);
fri.type = fa->fa_type;
- fri.offload = fa->offload;
- fri.trap = fa->trap;
- fri.offload_failed = fa->offload_failed;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed = READ_ONCE(fa->offload_failed);
err = fib_dump_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
return 0;
err2:
+ rtnl_lock();
ipmr_free_table(mrt);
+ rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;
struct sock *sk = NULL;
struct inet_sock *isk;
struct hlist_nulls_node *hnode;
- int dif = skb->dev->ifindex;
+ int dif, sdif;
if (skb->protocol == htons(ETH_P_IP)) {
+ dif = inet_iif(skb);
+ sdif = inet_sdif(skb);
pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
(int)ident, &ip_hdr(skb)->daddr, dif);
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
+ dif = inet6_iif(skb);
+ sdif = inet6_sdif(skb);
pr_debug("try to find: num = %d, daddr = %pI6c, dif = %d\n",
(int)ident, &ipv6_hdr(skb)->daddr, dif);
#endif
+ } else {
+ pr_err("ping: protocol(%x) is not supported\n", ntohs(skb->protocol));
+ return NULL;
}
read_lock_bh(&ping_table.lock);
}
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != inet_sdif(skb))
+ sk->sk_bound_dev_if != sdif)
continue;
sock_hold(sk);
struct uncached_list {
spinlock_t lock;
struct list_head head;
+ struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
struct uncached_list *ul = rt->rt_uncached_list;
spin_lock_bh(&ul->lock);
- list_del(&rt->rt_uncached);
+ list_del_init(&rt->rt_uncached);
spin_unlock_bh(&ul->lock);
}
}
void rt_flush_dev(struct net_device *dev)
{
- struct rtable *rt;
+ struct rtable *rt, *safe;
int cpu;
for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
+ if (list_empty(&ul->head))
+ continue;
+
spin_lock_bh(&ul->lock);
- list_for_each_entry(rt, &ul->head, rt_uncached) {
+ list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
if (rt->dst.dev != dev)
continue;
rt->dst.dev = blackhole_netdev;
dev_replace_track(dev, blackhole_netdev,
&rt->dst.dev_tracker,
GFP_ATOMIC);
+ list_move(&rt->rt_uncached, &ul->quarantine);
}
spin_unlock_bh(&ul->lock);
}
fa->fa_dscp == inet_dsfield_to_dscp(fri.tos) &&
fa->fa_info == res.fi &&
fa->fa_type == fri.type) {
- fri.offload = fa->offload;
- fri.trap = fa->trap;
+ fri.offload = READ_ONCE(fa->offload);
+ fri.trap = READ_ONCE(fa->trap);
break;
}
}
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
+ INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
}
}
+/* skb changing from pure zc to mixed, must charge zc */
+static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
+{
+ if (unlikely(skb_zcopy_pure(skb))) {
+ u32 extra = skb->truesize -
+ SKB_TRUESIZE(skb_end_offset(skb));
+
+ if (!sk_wmem_schedule(sk, extra))
+ return -ENOMEM;
+
+ sk_mem_charge(sk, extra);
+ skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
+ }
+ return 0;
+}
+
static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags,
struct page *page, int offset, size_t *size)
{
tcp_mark_push(tp, skb);
goto new_segment;
}
- if (!sk_wmem_schedule(sk, copy))
+ if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy))
return NULL;
if (can_coalesce) {
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- /* skb changing from pure zc to mixed, must charge zc */
- if (unlikely(skb_zcopy_pure(skb))) {
- u32 extra = skb->truesize -
- SKB_TRUESIZE(skb_end_offset(skb));
-
- if (!sk_wmem_schedule(sk, extra))
- goto wait_for_space;
-
- sk_mem_charge(sk, extra);
- skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
- }
-
- if (!sk_wmem_schedule(sk, copy))
+ if (tcp_downgrade_zcopy_pure(sk, skb) ||
+ !sk_wmem_schedule(sk, copy))
goto wait_for_space;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
ireq->ir_mark = inet_request_mark(sk, skb);
#if IS_ENABLED(CONFIG_SMC)
- ireq->smc_ok = rx_opt->smc_ok;
+ ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested &&
+ tcp_sk(sk)->smc_hs_congested(sk));
#endif
}
ASSERT_RTNL();
- if (dev->mtu < IPV6_MIN_MTU)
+ if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
return ERR_PTR(-EINVAL);
ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
return ERR_PTR(err);
}
- if (snmp6_register_dev(ndev) < 0) {
- netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
- __func__, dev->name);
- goto err_release;
+ if (dev != blackhole_netdev) {
+ if (snmp6_register_dev(ndev) < 0) {
+ netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
+ __func__, dev->name);
+ goto err_release;
+ }
}
-
/* One reference from device. */
refcount_set(&ndev->refcnt, 1);
ipv6_mc_init_dev(ndev);
ndev->tstamp = jiffies;
- err = addrconf_sysctl_register(ndev);
- if (err) {
- ipv6_mc_destroy_dev(ndev);
- snmp6_unregister_dev(ndev);
- goto err_release;
+ if (dev != blackhole_netdev) {
+ err = addrconf_sysctl_register(ndev);
+ if (err) {
+ ipv6_mc_destroy_dev(ndev);
+ snmp6_unregister_dev(ndev);
+ goto err_release;
+ }
}
/* protected by rtnl_lock */
rcu_assign_pointer(dev->ip6_ptr, ndev);
- /* Join interface-local all-node multicast group */
- ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
+ if (dev != blackhole_netdev) {
+ /* Join interface-local all-node multicast group */
+ ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
- /* Join all-node multicast group */
- ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
-
- /* Join all-router multicast group if forwarding is set */
- if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
- ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+ /* Join all-node multicast group */
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
+ /* Join all-router multicast group if forwarding is set */
+ if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
+ ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+ }
return ndev;
err_release:
}
EXPORT_SYMBOL(ipv6_dev_get_saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags)
+static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+ u32 banned_flags)
{
struct inet6_ifaddr *ifp;
int err = -EADDRNOTAVAIL;
kfree(net->ipv6.devconf_all);
net->ipv6.devconf_all = NULL;
- cancel_delayed_work(&net->ipv6.addr_chk_work);
+ cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
/*
* Check hash table, then free it.
*/
goto out_nowq;
}
- /* The addrconf netdev notifier requires that loopback_dev
- * has it's ipv6 private information allocated and setup
- * before it can bring up and give link-local addresses
- * to other devices which are up.
- *
- * Unfortunately, loopback_dev is not necessarily the first
- * entry in the global dev_base list of net devices. In fact,
- * it is likely to be the very last entry on that list.
- * So this causes the notifier registry below to try and
- * give link-local addresses to all devices besides loopback_dev
- * first, then loopback_dev, which cases all the non-loopback_dev
- * devices to fail to get a link-local address.
- *
- * So, as a temporary fix, allocate the ipv6 structure for
- * loopback_dev first by hand.
- * Longer term, all of the dependencies ipv6 has upon the loopback
- * device and it being up should be removed.
- */
rtnl_lock();
- idev = ipv6_add_dev(init_net.loopback_dev);
+ idev = ipv6_add_dev(blackhole_netdev);
rtnl_unlock();
if (IS_ERR(idev)) {
err = PTR_ERR(idev);
err = -EINVAL;
goto done;
}
- if (fl_shared_exclusive(fl) || fl->opt)
+ if (fl_shared_exclusive(fl) || fl->opt) {
+ WRITE_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl, 1);
static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
+ }
return fl;
done:
if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
(sk->sk_protocol == IPPROTO_UDP ||
+ sk->sk_protocol == IPPROTO_ICMPV6 ||
sk->sk_protocol == IPPROTO_RAW)) {
ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
sizeof(struct ipv6hdr));
return 0;
err2:
+ rtnl_lock();
ip6mr_free_table(mrt);
+ rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;
skb_reserve(skb, hlen);
skb_tailroom_reserve(skb, mtu, tlen);
- if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
/* <draft-ietf-magma-mld-source-05.txt>:
* use unspecified address as the source address
* when a valid link-local address is not available.
return;
}
neigh->flags |= NTF_ROUTER;
- } else if (rt) {
+ } else if (rt && IPV6_EXTRACT_PREF(rt->fib6_flags) != pref) {
+ struct nl_info nlinfo = {
+ .nl_net = net,
+ };
rt->fib6_flags = (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
+ inet6_rt_notify(RTM_NEWROUTE, rt, &nlinfo, NLM_F_REPLACE);
}
if (rt)
ipc6.sockc.tsflags = sk->sk_tsflags;
ipc6.sockc.mark = sk->sk_mark;
- err = sock_cmsg_send(sk, msg, &ipc6.sockc);
- if (err)
- return err;
+ if (msg->msg_controllen) {
+ struct ipv6_txoptions opt = {};
+
+ opt.tot_len = sizeof(opt);
+ ipc6.opt = &opt;
- /* TODO: use ip6_datagram_send_ctl to get options from cmsg */
+ err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
+ if (err < 0)
+ return err;
+
+ /* Changes to txoptions and flow info are not implemented, yet.
+ * Drop the options, fl6 is wiped below.
+ */
+ ipc6.opt = NULL;
+ }
memset(&fl6, 0, sizeof(fl6));
pfh.wcheck = 0;
pfh.family = AF_INET6;
- ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
+ if (ipc6.hlimit < 0)
+ ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
lock_sock(sk);
err = ip6_append_data(sk, ping_getfrag, &pfh, len,
struct uncached_list {
spinlock_t lock;
struct list_head head;
+ struct list_head quarantine;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
{
if (!list_empty(&rt->rt6i_uncached)) {
struct uncached_list *ul = rt->rt6i_uncached_list;
- struct net *net = dev_net(rt->dst.dev);
spin_lock_bh(&ul->lock);
- list_del(&rt->rt6i_uncached);
- atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
+ list_del_init(&rt->rt6i_uncached);
spin_unlock_bh(&ul->lock);
}
}
-static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
+static void rt6_uncached_list_flush_dev(struct net_device *dev)
{
- struct net_device *loopback_dev = net->loopback_dev;
int cpu;
- if (dev == loopback_dev)
- return;
-
for_each_possible_cpu(cpu) {
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
- struct rt6_info *rt;
+ struct rt6_info *rt, *safe;
+
+ if (list_empty(&ul->head))
+ continue;
spin_lock_bh(&ul->lock);
- list_for_each_entry(rt, &ul->head, rt6i_uncached) {
+ list_for_each_entry_safe(rt, safe, &ul->head, rt6i_uncached) {
struct inet6_dev *rt_idev = rt->rt6i_idev;
struct net_device *rt_dev = rt->dst.dev;
+ bool handled = false;
if (rt_idev->dev == dev) {
- rt->rt6i_idev = in6_dev_get(loopback_dev);
+ rt->rt6i_idev = in6_dev_get(blackhole_netdev);
in6_dev_put(rt_idev);
+ handled = true;
}
if (rt_dev == dev) {
dev_replace_track(rt_dev, blackhole_netdev,
&rt->dst.dev_tracker,
GFP_ATOMIC);
+ handled = true;
}
+ if (handled)
+ list_move(&rt->rt6i_uncached,
+ &ul->quarantine);
}
spin_unlock_bh(&ul->lock);
}
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct inet6_dev *idev = rt->rt6i_idev;
- struct net_device *loopback_dev =
- dev_net(dev)->loopback_dev;
- if (idev && idev->dev != loopback_dev) {
- struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
- if (loopback_idev) {
- rt->rt6i_idev = loopback_idev;
+ if (idev && idev->dev != blackhole_netdev) {
+ struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
+
+ if (blackhole_idev) {
+ rt->rt6i_idev = blackhole_idev;
in6_dev_put(idev);
}
}
* if caller sets RT6_LOOKUP_F_DST_NOREF flag.
*/
rt6_uncached_list_add(rt);
- atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
rcu_read_unlock();
return rt;
* do proper release of the net_device
*/
rt6_uncached_list_add(rt);
- atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
void rt6_disable_ip(struct net_device *dev, unsigned long event)
{
rt6_sync_down_dev(dev, event);
- rt6_uncached_list_flush_dev(dev_net(dev), dev);
+ rt6_uncached_list_flush_dev(dev);
neigh_ifdown(&nd_tbl, dev);
}
err = -EINVAL;
rtm = nlmsg_data(nlh);
+ if (rtm->rtm_tos) {
+ NL_SET_ERR_MSG(extack,
+ "Invalid dsfield (tos): option not available for IPv6");
+ goto errout;
+ }
+
*cfg = (struct fib6_config){
.fc_table = rtm->rtm_table,
.fc_dst_len = rtm->rtm_dst_len,
}
if (!dst) {
- if (rt->offload)
+ if (READ_ONCE(rt->offload))
rtm->rtm_flags |= RTM_F_OFFLOAD;
- if (rt->trap)
+ if (READ_ONCE(rt->trap))
rtm->rtm_flags |= RTM_F_TRAP;
- if (rt->offload_failed)
+ if (READ_ONCE(rt->offload_failed))
rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
}
struct sk_buff *skb;
int err;
- if (f6i->offload == offload && f6i->trap == trap &&
- f6i->offload_failed == offload_failed)
+ if (READ_ONCE(f6i->offload) == offload &&
+ READ_ONCE(f6i->trap) == trap &&
+ READ_ONCE(f6i->offload_failed) == offload_failed)
return;
- f6i->offload = offload;
- f6i->trap = trap;
+ WRITE_ONCE(f6i->offload, offload);
+ WRITE_ONCE(f6i->trap, trap);
/* 2 means send notifications only if offload_failed was changed. */
if (net->ipv6.sysctl.fib_notify_on_flag_change == 2 &&
- f6i->offload_failed == offload_failed)
+ READ_ONCE(f6i->offload_failed) == offload_failed)
return;
- f6i->offload_failed = offload_failed;
+ WRITE_ONCE(f6i->offload_failed, offload_failed);
if (!rcu_access_pointer(f6i->fib6_node))
/* The route was removed from the tree, do not send
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
+ INIT_LIST_HEAD(&ul->quarantine);
spin_lock_init(&ul->lock);
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
const struct in6_addr *saddr, *daddr;
struct net *net = dev_net(skb->dev);
struct udphdr *uh;
return udp6_unicast_rcv_skb(sk, skb, uh);
}
+ reason = SKB_DROP_REASON_NO_SOCKET;
+
if (!uh->check)
goto report_csum_error;
__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return 0;
short_packet:
+ if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
+ reason = SKB_DROP_REASON_PKT_TOO_SMALL;
net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr, ntohs(uh->source),
report_csum_error:
udp6_csum_zero_error(skb);
csum_error:
+ if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
+ reason = SKB_DROP_REASON_UDP_CSUM;
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
return 0;
}
xdst->u.rt6.rt6i_src = rt->rt6i_src;
INIT_LIST_HEAD(&xdst->u.rt6.rt6i_uncached);
rt6_uncached_list_add(&xdst->u.rt6);
- atomic_inc(&dev_net(dev)->ipv6.rt6_stats->fib_rt_uncache);
return 0;
}
// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2019 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2021 Intel Corporation
*/
#include <net/mac80211.h>
#define IEEE80211_VHT_STREAM_GROUPS 8 /* BW(=4) * SGI(=2) */
#define IEEE80211_HE_MAX_STREAMS 8
-#define IEEE80211_HE_STREAM_GROUPS 12 /* BW(=4) * GI(=3) */
#define IEEE80211_HT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_HT_STREAM_GROUPS)
#define IEEE80211_VHT_GROUPS_NB (IEEE80211_MAX_STREAMS * \
IEEE80211_VHT_STREAM_GROUPS)
-#define IEEE80211_HE_GROUPS_NB (IEEE80211_HE_MAX_STREAMS * \
- IEEE80211_HE_STREAM_GROUPS)
-#define IEEE80211_GROUPS_NB (IEEE80211_HT_GROUPS_NB + \
- IEEE80211_VHT_GROUPS_NB + \
- IEEE80211_HE_GROUPS_NB)
#define IEEE80211_HT_GROUP_0 0
#define IEEE80211_VHT_GROUP_0 (IEEE80211_HT_GROUP_0 + IEEE80211_HT_GROUPS_NB)
struct sta_info *sta = container_of(pubsta, struct sta_info,
sta);
struct ieee80211_rx_status stat;
- struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate;
+ struct ieee80211_tx_rate *tx_rate = &sta->tx_stats.last_rate;
struct rate_info *ri = &sta->tx_stats.last_rate_info;
u32 duration, overhead;
u8 agg_shift;
- if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ if (ieee80211_fill_rx_status(&stat, hw, tx_rate, ri, band, len))
return 0;
if (stat.encoding == RX_ENC_LEGACY || !ampdu)
.llseek = generic_file_llseek, \
};
+#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
#define DEBUGFS_STATS_ADD(name) \
debugfs_create_u32(#name, 0400, statsd, &local->name);
+#endif
#define DEBUGFS_DEVSTATS_ADD(name) \
debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2021 Intel Corporation
*/
#include <linux/kobject.h>
return mac80211_format_buffer(userbuf, count, ppos, \
format_string, key->prop); \
}
-#define KEY_READ_D(name) KEY_READ(name, name, "%d\n")
#define KEY_READ_X(name) KEY_READ(name, name, "0x%x\n")
#define KEY_OPS(name) \
/*
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020 Intel Corporation
+ * Copyright (C) 2020-2021 Intel Corporation
*/
#include <linux/kernel.h>
IEEE80211_IF_FMT(name, field, "%#x\n")
#define IEEE80211_IF_FMT_LHEX(name, field) \
IEEE80211_IF_FMT(name, field, "%#lx\n")
-#define IEEE80211_IF_FMT_SIZE(name, field) \
- IEEE80211_IF_FMT(name, field, "%zd\n")
#define IEEE80211_IF_FMT_HEXARRAY(name, field) \
static ssize_t ieee80211_if_fmt_##name( \
u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
const struct cfg80211_chan_def *chandef);
u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata, u8 iftype);
-u8 *ieee80211_ie_build_he_cap(u8 *pos,
+u8 *ieee80211_ie_build_he_cap(u32 disable_flags, u8 *pos,
const struct ieee80211_sta_he_cap *he_cap,
u8 *end);
void ieee80211_ie_build_he_6ghz_cap(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
pos = skb_put(skb, ie_len);
- ieee80211_ie_build_he_cap(pos, he_cap, pos + ie_len);
+ ieee80211_ie_build_he_cap(0, pos, he_cap, pos + ie_len);
return 0;
}
struct sk_buff *skb,
struct ieee80211_supported_band *sband)
{
- u8 *pos;
+ u8 *pos, *pre_he_pos;
const struct ieee80211_sta_he_cap *he_cap = NULL;
struct ieee80211_chanctx_conf *chanctx_conf;
u8 he_cap_size;
he_cap = ieee80211_get_he_iftype_cap(sband,
ieee80211_vif_type_p2p(&sdata->vif));
- if (!he_cap || !reg_cap)
+ if (!he_cap || !chanctx_conf || !reg_cap)
return;
+ /* get a max size estimate */
he_cap_size =
2 + 1 + sizeof(he_cap->he_cap_elem) +
ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
he_cap->he_cap_elem.phy_cap_info);
pos = skb_put(skb, he_cap_size);
- ieee80211_ie_build_he_cap(pos, he_cap, pos + he_cap_size);
+ pre_he_pos = pos;
+ pos = ieee80211_ie_build_he_cap(sdata->u.mgd.flags,
+ pos, he_cap, pos + he_cap_size);
+ /* trim excess if any */
+ skb_trim(skb, skb->len - (pre_he_pos + he_cap_size - pos));
ieee80211_ie_build_he_6ghz_cap(sdata, skb);
}
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
const struct ieee80211_sband_iftype_data *iftd;
struct ieee80211_prep_tx_info info = {};
+ int ret;
/* we know it's writable, cast away the const */
if (assoc_data->ie_len)
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
- return;
+ return -EINVAL;
}
chan = chanctx_conf->def.chan;
rcu_read_unlock();
(iftd ? iftd->vendor_elems.len : 0),
GFP_KERNEL);
if (!skb)
- return;
+ return -ENOMEM;
skb_reserve(skb, local->hw.extra_tx_headroom);
skb_put_data(skb, assoc_data->ie + offset, noffset - offset);
}
- if (assoc_data->fils_kek_len &&
- fils_encrypt_assoc_req(skb, assoc_data) < 0) {
- dev_kfree_skb(skb);
- return;
+ if (assoc_data->fils_kek_len) {
+ ret = fils_encrypt_assoc_req(skb, assoc_data);
+ if (ret < 0) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
}
pos = skb_tail_pointer(skb);
kfree(ifmgd->assoc_req_ies);
ifmgd->assoc_req_ies = kmemdup(ie_start, pos - ie_start, GFP_ATOMIC);
+ if (!ifmgd->assoc_req_ies) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
ifmgd->assoc_req_ies_len = pos - ie_start;
drv_mgd_prepare_tx(local, sdata, &info);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_INTFL_MLME_CONN_TX;
ieee80211_tx_skb(sdata, skb);
+
+ return 0;
}
void ieee80211_send_pspoll(struct ieee80211_local *local,
{
struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
struct ieee80211_local *local = sdata->local;
+ int ret;
sdata_assert_lock(sdata);
sdata_info(sdata, "associate with %pM (try %d/%d)\n",
assoc_data->bss->bssid, assoc_data->tries,
IEEE80211_ASSOC_MAX_TRIES);
- ieee80211_send_assoc(sdata);
+ ret = ieee80211_send_assoc(sdata);
+ if (ret)
+ return ret;
if (!ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
rcu_read_unlock();
}
-static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_bss *cbss)
+static u8 ieee80211_max_rx_chains(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_bss *cbss)
{
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const struct element *ht_cap_elem, *vht_cap_elem;
+ const struct cfg80211_bss_ies *ies;
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
+ const struct ieee80211_he_cap_elem *he_cap;
+ const struct element *he_cap_elem;
+ u16 mcs_80_map, mcs_160_map;
+ int i, mcs_nss_size;
+ bool support_160;
u8 chains = 1;
if (ifmgd->flags & IEEE80211_STA_DISABLE_HT)
chains = max(chains, nss);
}
+ if (ifmgd->flags & IEEE80211_STA_DISABLE_HE)
+ return chains;
+
+ ies = rcu_dereference(cbss->ies);
+ he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
+ ies->data, ies->len);
+
+ if (!he_cap_elem || he_cap_elem->datalen < sizeof(*he_cap))
+ return chains;
+
+ /* skip one byte ext_tag_id */
+ he_cap = (void *)(he_cap_elem->data + 1);
+ mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
+
+ /* invalid HE IE */
+ if (he_cap_elem->datalen < 1 + mcs_nss_size + sizeof(*he_cap))
+ return chains;
+
+ /* mcs_nss is right after he_cap info */
+ he_mcs_nss_supp = (void *)(he_cap + 1);
+
+ mcs_80_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
+
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = mcs_80_map >> (2 * i) & 3;
+
+ if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ chains = max_t(u8, chains, i + 1);
+ break;
+ }
+ }
+
+ support_160 = he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (!support_160)
+ return chains;
+
+ mcs_160_map = le16_to_cpu(he_mcs_nss_supp->tx_mcs_160);
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = mcs_160_map >> (2 * i) & 3;
+
+ if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ chains = max_t(u8, chains, i + 1);
+ break;
+ }
+ }
+
return chains;
}
+static bool
+ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_bss_ies *ies,
+ const struct ieee80211_he_operation *he_op)
+{
+ const struct element *he_cap_elem;
+ const struct ieee80211_he_cap_elem *he_cap;
+ struct ieee80211_he_mcs_nss_supp *he_mcs_nss_supp;
+ u16 mcs_80_map_tx, mcs_80_map_rx;
+ u16 ap_min_req_set;
+ int mcs_nss_size;
+ int nss;
+
+ he_cap_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_CAPABILITY,
+ ies->data, ies->len);
+
+ /* invalid HE IE */
+ if (!he_cap_elem || he_cap_elem->datalen < 1 + sizeof(*he_cap)) {
+ sdata_info(sdata,
+ "Invalid HE elem, Disable HE\n");
+ return false;
+ }
+
+ /* skip one byte ext_tag_id */
+ he_cap = (void *)(he_cap_elem->data + 1);
+ mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap);
+
+ /* invalid HE IE */
+ if (he_cap_elem->datalen < 1 + sizeof(*he_cap) + mcs_nss_size) {
+ sdata_info(sdata,
+ "Invalid HE elem with nss size, Disable HE\n");
+ return false;
+ }
+
+ /* mcs_nss is right after he_cap info */
+ he_mcs_nss_supp = (void *)(he_cap + 1);
+
+ mcs_80_map_tx = le16_to_cpu(he_mcs_nss_supp->tx_mcs_80);
+ mcs_80_map_rx = le16_to_cpu(he_mcs_nss_supp->rx_mcs_80);
+
+ /* P802.11-REVme/D0.3
+ * 27.1.1 Introduction to the HE PHY
+ * ...
+ * An HE STA shall support the following features:
+ * ...
+ * Single spatial stream HE-MCSs 0 to 7 (transmit and receive) in all
+ * supported channel widths for HE SU PPDUs
+ */
+ if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ sdata_info(sdata,
+ "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
+ mcs_80_map_tx, mcs_80_map_rx);
+ return false;
+ }
+
+ if (!he_op)
+ return true;
+
+ ap_min_req_set = le16_to_cpu(he_op->he_mcs_nss_set);
+
+ /* make sure the AP is consistent with itself
+ *
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * A STA that is operating in an HE BSS shall be able to receive and
+ * transmit at each of the <HE-MCS, NSS> tuple values indicated by the
+ * Basic HE-MCS And NSS Set field of the HE Operation parameter of the
+ * MLME-START.request primitive and shall be able to receive at each of
+ * the <HE-MCS, NSS> tuple values indicated by the Supported HE-MCS and
+ * NSS Set field in the HE Capabilities parameter of the MLMESTART.request
+ * primitive
+ */
+ for (nss = 8; nss > 0; nss--) {
+ u8 ap_op_val = (ap_min_req_set >> (2 * (nss - 1))) & 3;
+ u8 ap_rx_val;
+ u8 ap_tx_val;
+
+ if (ap_op_val == IEEE80211_HE_MCS_NOT_SUPPORTED)
+ continue;
+
+ ap_rx_val = (mcs_80_map_rx >> (2 * (nss - 1))) & 3;
+ ap_tx_val = (mcs_80_map_tx >> (2 * (nss - 1))) & 3;
+
+ if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
+ sdata_info(sdata,
+ "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
+ nss, ap_rx_val, ap_rx_val, ap_op_val);
+ return false;
+ }
+ }
+
+ return true;
+}
+
static bool
ieee80211_verify_sta_he_mcs_support(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
/*
* Make sure the HE AP doesn't require MCSs that aren't
- * supported by the client
+ * supported by the client as required by spec
+ *
+ * P802.11-REVme/D0.3
+ * 26.17.1 Basic HE BSS operation
+ *
+ * An HE STA shall not attempt to join * (MLME-JOIN.request primitive)
+ * a BSS, unless it supports (i.e., is able to both transmit and
+ * receive using) all of the <HE-MCS, NSS> tuples in the basic
+ * HE-MCS and NSS set.
*/
if (sta_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
sta_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
}
}
- if (!ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
+ if (!ieee80211_verify_peer_he_mcs_support(sdata, ies, he_oper) ||
+ !ieee80211_verify_sta_he_mcs_support(sdata, sband, he_oper))
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
s1g_oper,
&chandef, false);
- sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
+ sdata->needed_rx_chains = min(ieee80211_max_rx_chains(sdata, cbss),
local->rx_chains);
rcu_read_unlock();
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2019-2020 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*/
#include <linux/netdevice.h>
#include <linux/types.h>
/* do the header conversion - first grab the addresses */
ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
+ skb_postpull_rcsum(skb, skb->data + snap_offs,
+ sizeof(rfc1042_header) + 2);
/* remove the SNAP but leave the ethertype */
skb_pull(skb, snap_offs + sizeof(rfc1042_header));
/* push the addresses in front */
goto free;
sta->mesh->plink_sta = sta;
spin_lock_init(&sta->mesh->plink_lock);
- if (ieee80211_vif_is_mesh(&sdata->vif) &&
- !sdata->u.mesh.user_mpm)
+ if (!sdata->u.mesh.user_mpm)
timer_setup(&sta->mesh->plink_timer, mesh_plink_timer,
0);
sta->mesh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright 2021-2022 Intel Corporation
*/
#include <linux/export.h>
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr = (void *)skb->data;
+ bool is_valid_ack_signal =
+ !!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID);
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
cfg80211_probe_status(sdata->dev, hdr->addr1,
cookie, acked,
info->status.ack_signal,
- info->status.is_valid_ack_signal,
+ is_valid_ack_signal,
GFP_ATOMIC);
else if (ieee80211_is_mgmt(hdr->frame_control))
cfg80211_mgmt_tx_status(&sdata->wdev, cookie,
*/
#define STA_LOST_PKT_THRESHOLD 50
#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */
-#define STA_LOST_TDLS_PKT_THRESHOLD 10
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta,
}
/*
- * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
+ * If we're in TDLS mode, make sure that all STA_LOST_PKT_THRESHOLD
* of the last packets were lost, and that no ACK was received in the
* last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
* mechanism.
struct ieee80211_supported_band *sband;
struct sta_info *sta = NULL;
int rates_idx, retry_count;
- bool acked, noack_success;
+ bool acked, noack_success, ack_signal_valid;
u16 tx_time_est;
if (pubsta) {
acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
+ ack_signal_valid =
+ !!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID);
if (pubsta) {
struct ieee80211_sub_if_data *sdata = sta->sdata;
unlikely(sdata->u.mgd.probe_send_count > 0))
sdata->u.mgd.probe_send_count = 0;
- if (info->status.is_valid_ack_signal) {
+ if (ack_signal_valid) {
sta->status_stats.last_ack_signal =
(s8)info->status.ack_signal;
sta->status_stats.ack_signal_filled = true;
if (he_cap &&
cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band),
IEEE80211_CHAN_NO_HE)) {
- pos = ieee80211_ie_build_he_cap(pos, he_cap, end);
+ pos = ieee80211_ie_build_he_cap(0, pos, he_cap, end);
if (!pos)
goto out_err;
}
he_cap->he_cap_elem.phy_cap_info);
}
-u8 *ieee80211_ie_build_he_cap(u8 *pos,
+u8 *ieee80211_ie_build_he_cap(u32 disable_flags, u8 *pos,
const struct ieee80211_sta_he_cap *he_cap,
u8 *end)
{
+ struct ieee80211_he_cap_elem elem;
u8 n;
u8 ie_len;
u8 *orig_pos = pos;
if (!he_cap)
return orig_pos;
- n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem);
+ /* modify on stack first to calculate 'n' and 'ie_len' correctly */
+ elem = he_cap->he_cap_elem;
+
+ if (disable_flags & IEEE80211_STA_DISABLE_40MHZ)
+ elem.phy_cap_info[0] &=
+ ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G);
+
+ if (disable_flags & IEEE80211_STA_DISABLE_160MHZ)
+ elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ if (disable_flags & IEEE80211_STA_DISABLE_80P80MHZ)
+ elem.phy_cap_info[0] &=
+ ~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+
+ n = ieee80211_he_mcs_nss_size(&elem);
ie_len = 2 + 1 +
sizeof(he_cap->he_cap_elem) + n +
ieee80211_he_ppe_size(he_cap->ppe_thres[0],
*pos++ = WLAN_EID_EXT_HE_CAPABILITY;
/* Fixed data */
- memcpy(pos, &he_cap->he_cap_elem, sizeof(he_cap->he_cap_elem));
- pos += sizeof(he_cap->he_cap_elem);
+ memcpy(pos, &elem, sizeof(elem));
+ pos += sizeof(elem);
memcpy(pos, &he_cap->he_mcs_nss_supp, n);
pos += n;
channel_type = NL80211_CHAN_HT40MINUS;
break;
default:
- channel_type = NL80211_CHAN_NO_HT;
return false;
}
for (i = 7; i >= 0; i--) {
u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
- if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
rx_mcs_160 = i + 1;
break;
}
for (i = 7; i >= 0; i--) {
u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
- if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
rx_mcs_80 = i + 1;
break;
}
* this function.
*/
rc = mctp_key_add(key, msk);
- if (rc)
+ if (rc) {
kfree(key);
+ } else {
+ trace_mctp_key_acquire(key);
- trace_mctp_key_acquire(key);
-
- /* we don't need to release key->lock on exit */
- mctp_key_unref(key);
+ /* we don't need to release key->lock on exit */
+ mctp_key_unref(key);
+ }
key = NULL;
} else {
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct mpls_dev *mdev;
unsigned int flags;
+ int err;
if (event == NETDEV_REGISTER) {
mdev = mpls_add_dev(dev);
return NOTIFY_OK;
switch (event) {
- int err;
case NETDEV_DOWN:
err = mpls_ifdown(dev, event);
}
}
-void mptcp_get_options(const struct sock *sk,
- const struct sk_buff *skb,
+void mptcp_get_options(const struct sk_buff *skb,
struct mptcp_options_received *mp_opt)
{
const struct tcphdr *th = tcp_hdr(skb);
bool drop_other_suboptions = false;
unsigned int opt_size = *size;
bool echo;
- bool port;
int len;
/* add addr will strip the existing options, be sure to avoid breaking
if (!mptcp_pm_should_add_signal(msk) ||
(opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) ||
!mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr,
- &echo, &port, &drop_other_suboptions))
+ &echo, &drop_other_suboptions))
return false;
if (drop_other_suboptions)
remaining += opt_size;
- len = mptcp_add_addr_len(opts->addr.family, echo, port);
+ len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
if (remaining < len)
return false;
&mp_opt->addr);
pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
- msk, (unsigned long long)hmac,
- (unsigned long long)mp_opt->ahmac);
+ msk, hmac, mp_opt->ahmac);
return hmac == mp_opt->ahmac;
}
return true;
}
- mptcp_get_options(sk, skb, &mp_opt);
+ mptcp_get_options(skb, &mp_opt);
/* The subflow can be in close state only if check_fully_established()
* just sent a reset. If so, tell the caller to ignore the current packet.
}
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr)
+ const struct mptcp_addr_info *addr)
{
struct mptcp_pm_data *pm = &msk->pm;
/* path manager helpers */
-bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb,
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
unsigned int opt_size, unsigned int remaining,
struct mptcp_addr_info *addr, bool *echo,
- bool *port, bool *drop_other_suboptions)
+ bool *drop_other_suboptions)
{
int ret = false;
u8 add_addr;
u8 family;
+ bool port;
spin_lock_bh(&msk->pm.lock);
}
*echo = mptcp_pm_should_add_signal_echo(msk);
- *port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
+ port = !!(*echo ? msk->pm.remote.port : msk->pm.local.port);
family = *echo ? msk->pm.remote.family : msk->pm.local.family;
- if (remaining < mptcp_add_addr_len(family, *echo, *port))
+ if (remaining < mptcp_add_addr_len(family, *echo, port))
goto out_unlock;
if (*echo) {
}
static bool lookup_subflow_by_saddr(const struct list_head *list,
- struct mptcp_addr_info *saddr)
+ const struct mptcp_addr_info *saddr)
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
}
static bool lookup_subflow_by_daddr(const struct list_head *list,
- struct mptcp_addr_info *daddr)
+ const struct mptcp_addr_info *daddr)
{
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info cur;
static struct mptcp_pm_addr_entry *
select_local_address(const struct pm_nl_pernet *pernet,
- struct mptcp_sock *msk)
+ const struct mptcp_sock *msk)
{
+ const struct sock *sk = (const struct sock *)msk;
struct mptcp_pm_addr_entry *entry, *ret = NULL;
- struct sock *sk = (struct sock *)msk;
msk_owned_by_me(msk);
}
static struct mptcp_pm_addr_entry *
-select_signal_address(struct pm_nl_pernet *pernet, struct mptcp_sock *msk)
+select_signal_address(struct pm_nl_pernet *pernet, const struct mptcp_sock *msk)
{
struct mptcp_pm_addr_entry *entry, *ret = NULL;
return ret;
}
-unsigned int mptcp_pm_get_add_addr_signal_max(struct mptcp_sock *msk)
+unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk)
{
- struct pm_nl_pernet *pernet;
+ const struct pm_nl_pernet *pernet;
- pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+ pernet = net_generic(sock_net((const struct sock *)msk), pm_nl_pernet_id);
return READ_ONCE(pernet->add_addr_signal_max);
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_signal_max);
-unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk)
+unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet;
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_add_addr_accept_max);
-unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk)
+unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet;
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_subflows_max);
-unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk)
+unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk)
{
struct pm_nl_pernet *pernet;
}
struct mptcp_pm_add_entry *
-mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr)
+mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr)
{
struct mptcp_pm_add_entry *entry;
struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr, bool check_id)
+ const struct mptcp_addr_info *addr, bool check_id)
{
struct mptcp_pm_add_entry *entry;
struct sock *sk = (struct sock *)msk;
}
static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
- struct mptcp_pm_addr_entry *entry)
+ const struct mptcp_pm_addr_entry *entry)
{
struct mptcp_pm_add_entry *add_entry = NULL;
struct sock *sk = (struct sock *)msk;
}
}
-static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr,
- struct mptcp_addr_info *addr)
+static bool lookup_address_in_vec(const struct mptcp_addr_info *addrs, unsigned int nr,
+ const struct mptcp_addr_info *addr)
{
int i;
}
static int
-lookup_id_by_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
+lookup_id_by_addr(const struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
{
- struct mptcp_pm_addr_entry *entry;
+ const struct mptcp_pm_addr_entry *entry;
int ret = -1;
rcu_read_lock();
static int mptcp_pm_nl_create_listen_socket(struct sock *sk,
struct mptcp_pm_addr_entry *entry)
{
+ int addrlen = sizeof(struct sockaddr_in);
struct sockaddr_storage addr;
struct mptcp_sock *msk;
struct socket *ssock;
}
mptcp_info2sockaddr(&entry->addr, &addr, entry->addr.family);
- err = kernel_bind(ssock, (struct sockaddr *)&addr,
- sizeof(struct sockaddr_in));
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+ if (entry->addr.family == AF_INET6)
+ addrlen = sizeof(struct sockaddr_in6);
+#endif
+ err = kernel_bind(ssock, (struct sockaddr *)&addr, addrlen);
if (err) {
pr_warn("kernel_bind error, err=%d", err);
goto out;
}
static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr)
+ const struct mptcp_addr_info *addr)
{
struct mptcp_pm_add_entry *entry;
}
static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr,
+ const struct mptcp_addr_info *addr,
bool force)
{
struct mptcp_rm_list list = { .nr = 0 };
struct sock *tcp_sock; /* tcp sk backpointer */
struct sock *conn; /* parent mptcp_sock */
const struct inet_connection_sock_af_ops *icsk_af_ops;
- void (*tcp_data_ready)(struct sock *sk);
void (*tcp_state_change)(struct sock *sk);
- void (*tcp_write_space)(struct sock *sk);
void (*tcp_error_report)(struct sock *sk);
struct rcu_head rcu;
static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
struct mptcp_subflow_context *ctx)
{
- sk->sk_data_ready = ctx->tcp_data_ready;
+ sk->sk_data_ready = sock_def_readable;
sk->sk_state_change = ctx->tcp_state_change;
- sk->sk_write_space = ctx->tcp_write_space;
+ sk->sk_write_space = sk_stream_write_space;
sk->sk_error_report = ctx->tcp_error_report;
inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
struct sock *mptcp_sk_clone(const struct sock *sk,
const struct mptcp_options_received *mp_opt,
struct request_sock *req);
-void mptcp_get_options(const struct sock *sk,
- const struct sk_buff *skb,
+void mptcp_get_options(const struct sk_buff *skb,
struct mptcp_options_received *mp_opt);
void mptcp_finish_connect(struct sock *sk);
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr);
+ const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk);
void mptcp_pm_rm_addr_received(struct mptcp_sock *msk,
bool mptcp_pm_sport_in_anno_list(struct mptcp_sock *msk, const struct sock *sk);
struct mptcp_pm_add_entry *
mptcp_pm_del_add_timer(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr, bool check_id);
+ const struct mptcp_addr_info *addr, bool check_id);
struct mptcp_pm_add_entry *
-mptcp_lookup_anno_list_by_saddr(struct mptcp_sock *msk,
- struct mptcp_addr_info *addr);
+mptcp_lookup_anno_list_by_saddr(const struct mptcp_sock *msk,
+ const struct mptcp_addr_info *addr);
int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
u8 *flags, int *ifindex);
return TCPOLEN_MPTCP_RM_ADDR_BASE + roundup(rm_list->nr - 1, 4) + 1;
}
-bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, struct sk_buff *skb,
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, const struct sk_buff *skb,
unsigned int opt_size, unsigned int remaining,
struct mptcp_addr_info *addr, bool *echo,
- bool *port, bool *drop_other_suboptions);
+ bool *drop_other_suboptions);
bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
struct mptcp_rm_list *rm_list);
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk,
const struct mptcp_rm_list *rm_list);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
-unsigned int mptcp_pm_get_add_addr_signal_max(struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
-unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
+unsigned int mptcp_pm_get_add_addr_signal_max(const struct mptcp_sock *msk);
+unsigned int mptcp_pm_get_add_addr_accept_max(const struct mptcp_sock *msk);
+unsigned int mptcp_pm_get_subflows_max(const struct mptcp_sock *msk);
+unsigned int mptcp_pm_get_local_addr_max(const struct mptcp_sock *msk);
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
case SO_RCVLOWAT:
case SO_RCVTIMEO_OLD:
case SO_RCVTIMEO_NEW:
+ case SO_SNDTIMEO_OLD:
+ case SO_SNDTIMEO_NEW:
case SO_BUSY_POLL:
case SO_PREFER_BUSY_POLL:
case SO_BUSY_POLL_BUDGET:
return -EINVAL;
#endif
- mptcp_get_options(sk_listener, skb, &mp_opt);
+ mptcp_get_options(skb, &mp_opt);
opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
int err;
subflow_init_req(req, sk_listener);
- mptcp_get_options(sk_listener, skb, &mp_opt);
+ mptcp_get_options(skb, &mp_opt);
opt_mp_capable = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPC);
opt_mp_join = !!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ);
thmac = get_unaligned_be64(hmac);
pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
- subflow, subflow->token,
- (unsigned long long)thmac,
- (unsigned long long)subflow->thmac);
+ subflow, subflow->token, thmac, subflow->thmac);
return thmac == subflow->thmac;
}
subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
- mptcp_get_options(sk, skb, &mp_opt);
+ mptcp_get_options(skb, &mp_opt);
if (subflow->request_mptcp) {
if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
MPTCP_INC_STATS(sock_net(sk),
}
struct request_sock_ops mptcp_subflow_request_sock_ops;
-EXPORT_SYMBOL_GPL(mptcp_subflow_request_sock_ops);
-static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops;
+static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops;
-static struct inet_connection_sock_af_ops subflow_v6_specific;
-static struct inet_connection_sock_af_ops subflow_v6m_specific;
+static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
+static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
+static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
static struct proto tcpv6_prot_override;
static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
* reordered MPC will cause fallback, but we don't have other
* options.
*/
- mptcp_get_options(sk, skb, &mp_opt);
+ mptcp_get_options(skb, &mp_opt);
if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPC)) {
fallback = true;
goto create_child;
if (!new_msk)
fallback = true;
} else if (subflow_req->mp_join) {
- mptcp_get_options(sk, skb, &mp_opt);
+ mptcp_get_options(skb, &mp_opt);
if (!(mp_opt.suboptions & OPTIONS_MPTCP_MPJ) ||
!subflow_hmac_valid(req, &mp_opt) ||
!mptcp_can_accept_new_subflow(subflow_req->msk)) {
return child;
}
-static struct inet_connection_sock_af_ops subflow_specific;
+static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
static struct proto tcp_prot_override;
enum mapping_status {
mptcp_write_space(sk);
}
-static struct inet_connection_sock_af_ops *
+static const struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock *sk)
{
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
- struct inet_connection_sock_af_ops *target;
+ const struct inet_connection_sock_af_ops *target;
target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
tp->is_mptcp = 1;
ctx->icsk_af_ops = icsk->icsk_af_ops;
icsk->icsk_af_ops = subflow_default_af_ops(sk);
- ctx->tcp_data_ready = sk->sk_data_ready;
ctx->tcp_state_change = sk->sk_state_change;
- ctx->tcp_write_space = sk->sk_write_space;
ctx->tcp_error_report = sk->sk_error_report;
+
+ WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
+ WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
+
sk->sk_data_ready = subflow_data_ready;
sk->sk_write_space = subflow_write_space;
sk->sk_state_change = subflow_state_change;
new_ctx->conn_finished = 1;
new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
- new_ctx->tcp_data_ready = old_ctx->tcp_data_ready;
new_ctx->tcp_state_change = old_ctx->tcp_state_change;
- new_ctx->tcp_write_space = old_ctx->tcp_write_space;
new_ctx->tcp_error_report = old_ctx->tcp_error_report;
new_ctx->rel_write_seq = 1;
new_ctx->tcp_sock = newsk;
if (helper->from_nlattr)
helper->from_nlattr(helpinfo, ct);
- /* not in hash table yet so not strictly necessary */
+ /* disable helper auto-assignment for this entry */
+ ct->status |= IPS_HELPER;
RCU_INIT_POINTER(help->helper, helper);
}
} else {
pr_debug("Setting vtag %x for dir %d\n",
ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
+
+ /* don't renew timeout on init retransmit so
+ * port reuse by client or NAT middlebox cannot
+ * keep entry alive indefinitely (incl. nat info).
+ */
+ if (new_state == SCTP_CONNTRACK_CLOSED &&
+ old_state == SCTP_CONNTRACK_CLOSED &&
+ nf_ct_is_confirmed(ct))
+ ignore = true;
}
ct->proto.sctp.state = new_state;
}
}
+static void tcp_init_sender(struct ip_ct_tcp_state *sender,
+ struct ip_ct_tcp_state *receiver,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct tcphdr *tcph,
+ u32 end, u32 win)
+{
+ /* SYN-ACK in reply to a SYN
+ * or SYN from reply direction in simultaneous open.
+ */
+ sender->td_end =
+ sender->td_maxend = end;
+ sender->td_maxwin = (win == 0 ? 1 : win);
+
+ tcp_options(skb, dataoff, tcph, sender);
+ /* RFC 1323:
+ * Both sides must send the Window Scale option
+ * to enable window scaling in either direction.
+ */
+ if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE &&
+ receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE)) {
+ sender->td_scale = 0;
+ receiver->td_scale = 0;
+ }
+}
+
static bool tcp_in_window(struct nf_conn *ct,
enum ip_conntrack_dir dir,
unsigned int index,
* Initialize sender data.
*/
if (tcph->syn) {
- /*
- * SYN-ACK in reply to a SYN
- * or SYN from reply direction in simultaneous open.
- */
- sender->td_end =
- sender->td_maxend = end;
- sender->td_maxwin = (win == 0 ? 1 : win);
-
- tcp_options(skb, dataoff, tcph, sender);
- /*
- * RFC 1323:
- * Both sides must send the Window Scale option
- * to enable window scaling in either direction.
- */
- if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
- && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
- sender->td_scale =
- receiver->td_scale = 0;
+ tcp_init_sender(sender, receiver,
+ skb, dataoff, tcph,
+ end, win);
if (!tcph->ack)
/* Simultaneous open */
return true;
sender->td_maxwin = (win == 0 ? 1 : win);
tcp_options(skb, dataoff, tcph, sender);
+ } else if (tcph->syn && dir == IP_CT_DIR_REPLY &&
+ state->state == TCP_CONNTRACK_SYN_SENT) {
+ /* Retransmitted syn-ack, or syn (simultaneous open).
+ *
+ * Re-init state for this direction, just like for the first
+ * syn(-ack) reply, it might differ in seq, ack or tcp options.
+ */
+ tcp_init_sender(sender, receiver,
+ skb, dataoff, tcph,
+ end, win);
+ if (!tcph->ack)
+ return true;
}
if (!(tcph->ack)) {
{
struct tcphdr *tcph;
- if (pkt->tprot != IPPROTO_TCP)
+ if (pkt->tprot != IPPROTO_TCP || pkt->fragoff)
return NULL;
tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
{
unsigned int thoff = nft_thoff(pkt);
- if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
return -1;
switch (pkt->tprot) {
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
- if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
offset = skb_network_offset(skb);
break;
case NFT_PAYLOAD_TRANSPORT_HEADER:
- if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
+ if (!(pkt->flags & NFT_PKTINFO_L4PROTO) || pkt->fragoff)
goto err;
offset = nft_thoff(pkt);
break;
if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
pkt->tprot == IPPROTO_SCTP &&
skb->ip_summed != CHECKSUM_PARTIAL) {
- if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
+ if (pkt->fragoff == 0 &&
+ nft_payload_csum_sctp(skb, nft_thoff(pkt)))
goto err;
}
if (err)
goto nf_ct_failure;
err = nf_synproxy_ipv6_init(snet, ctx->net);
- if (err)
+ if (err) {
+ nf_synproxy_ipv4_fini(snet, ctx->net);
goto nf_ct_failure;
+ }
break;
}
if (par->family == NFPROTO_IPV4)
nf_defrag_ipv4_disable(par->net);
else if (par->family == NFPROTO_IPV6)
- nf_defrag_ipv4_disable(par->net);
+ nf_defrag_ipv6_disable(par->net);
}
static struct xt_match socket_mt_reg[] __read_mostly = {
restart_act_graph:
for (i = 0; i < nr_actions; i++) {
const struct tc_action *a = actions[i];
+ int repeat_ttl;
if (jmp_prgcnt > 0) {
jmp_prgcnt -= 1;
if (tc_act_skip_sw(a->tcfa_flags))
continue;
+
+ repeat_ttl = 32;
repeat:
ret = a->ops->act(skb, a, res);
- if (ret == TC_ACT_REPEAT)
- goto repeat; /* we need a ttl - JHS */
-
+ if (unlikely(ret == TC_ACT_REPEAT)) {
+ if (--repeat_ttl != 0)
+ goto repeat;
+ /* suspicious opcode, stop pipeline */
+ net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
+ return TC_ACT_OK;
+ }
if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
return err;
}
+static bool tcf_police_mtu_check(struct sk_buff *skb, u32 limit)
+{
+ u32 len;
+
+ if (skb_is_gso(skb))
+ return skb_gso_validate_mac_len(skb, limit);
+
+ len = qdisc_pkt_len(skb);
+ if (skb_at_tc_ingress(skb))
+ len += skb->mac_len;
+
+ return len <= limit;
+}
+
static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
goto inc_overlimits;
}
- if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
+ if (tcf_police_mtu_check(skb, p->tcfp_mtu)) {
if (!p->rate_present && !p->pps_present) {
ret = p->tcfp_result;
goto end;
/* Find qdisc */
if (!*parent) {
- *q = dev->qdisc;
+ *q = rcu_dereference(dev->qdisc);
*parent = (*q)->handle;
} else {
*q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
parent = tcm->tcm_parent;
if (!parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
return skb->len;
if (!tcm->tcm_parent)
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rtnl_dereference(dev->qdisc), handle);
if (q)
goto out;
if (!handle)
return NULL;
- q = qdisc_match_from_root(dev->qdisc, handle);
+ q = qdisc_match_from_root(rcu_dereference(dev->qdisc), handle);
if (q)
goto out;
skip:
if (!ingress) {
notify_and_destroy(net, skb, n, classid,
- dev->qdisc, new);
+ rtnl_dereference(dev->qdisc), new);
if (new && !new->ops->attach)
qdisc_refcount_inc(new);
- dev->qdisc = new ? : &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
if (new && new->ops->attach)
new->ops->attach(new);
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
if (!q) {
NL_SET_ERR_MSG(extack, "Cannot find specified qdisc on specified device");
q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
- q = dev->qdisc;
+ q = rtnl_dereference(dev->qdisc);
}
/* It may be default qdisc, ignore it */
s_q_idx = 0;
q_idx = 0;
- if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
+ if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
+ skb, cb, &q_idx, s_q_idx,
true, tca[TCA_DUMP_INVISIBLE]) < 0)
goto done;
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
/* Now qid is genuine qdisc handle consistent
* both with parent and child.
portid = TC_H_MAKE(qid, portid);
} else {
if (qid == 0)
- qid = dev->qdisc->handle;
+ qid = rtnl_dereference(dev->qdisc)->handle;
}
/* OK. Locate qdisc */
s_t = cb->args[0];
t = 0;
- if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t, true) < 0)
+ if (tc_dump_tclass_root(rtnl_dereference(dev->qdisc),
+ skb, tcm, cb, &t, s_t, true) < 0)
goto done;
dev_queue = dev_ingress_queue(dev);
if (!netif_is_multiqueue(dev) ||
dev->priv_flags & IFF_NO_QUEUE) {
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
} else {
qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
if (qdisc) {
- dev->qdisc = qdisc;
+ rcu_assign_pointer(dev->qdisc, qdisc);
qdisc->ops->attach(qdisc);
}
}
+ qdisc = rtnl_dereference(dev->qdisc);
/* Detect default qdisc setup/init failed and fallback to "noqueue" */
- if (dev->qdisc == &noop_qdisc) {
+ if (qdisc == &noop_qdisc) {
netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
default_qdisc_ops->id, noqueue_qdisc_ops.id);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
- dev->qdisc = txq->qdisc_sleeping;
- qdisc_refcount_inc(dev->qdisc);
+ qdisc = txq->qdisc_sleeping;
+ rcu_assign_pointer(dev->qdisc, qdisc);
+ qdisc_refcount_inc(qdisc);
dev->priv_flags ^= IFF_NO_QUEUE;
}
#ifdef CONFIG_NET_SCHED
- if (dev->qdisc != &noop_qdisc)
- qdisc_hash_add(dev->qdisc, false);
+ if (qdisc != &noop_qdisc)
+ qdisc_hash_add(qdisc, false);
#endif
}
* and noqueue_qdisc for virtual interfaces
*/
- if (dev->qdisc == &noop_qdisc)
+ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
void dev_qdisc_change_real_num_tx(struct net_device *dev,
unsigned int new_real_tx)
{
- struct Qdisc *qdisc = dev->qdisc;
+ struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
if (qdisc->ops->change_real_num_tx)
qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
void dev_init_scheduler(struct net_device *dev)
{
- dev->qdisc = &noop_qdisc;
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
if (dev_ingress_queue(dev))
shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
- qdisc_put(dev->qdisc);
- dev->qdisc = &noop_qdisc;
+ qdisc_put(rtnl_dereference(dev->qdisc));
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
WARN_ON(timer_pending(&dev->watchdog_timer));
}
* creation on client
*/
+static struct workqueue_struct *smc_tcp_ls_wq; /* wq for tcp listen work */
struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
struct workqueue_struct *smc_close_wq; /* wq for close work */
static void smc_tcp_listen_work(struct work_struct *);
static void smc_connect_work(struct work_struct *);
+int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
+ void *hdr;
+
+ if (cb_ctx->pos[0])
+ goto out;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &smc_gen_nl_family, NLM_F_MULTI,
+ SMC_NETLINK_DUMP_HS_LIMITATION);
+ if (!hdr)
+ return -ENOMEM;
+
+ if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED,
+ sock_net(skb->sk)->smc.limit_smc_hs))
+ goto err;
+
+ genlmsg_end(skb, hdr);
+ cb_ctx->pos[0] = 1;
+out:
+ return skb->len;
+err:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
+{
+ sock_net(skb->sk)->smc.limit_smc_hs = true;
+ return 0;
+}
+
+int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
+{
+ sock_net(skb->sk)->smc.limit_smc_hs = false;
+ return 0;
+}
+
static void smc_set_keepalive(struct sock *sk, int val)
{
struct smc_sock *smc = smc_sk(sk);
smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
}
+static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
+ struct sk_buff *skb,
+ struct request_sock *req,
+ struct dst_entry *dst,
+ struct request_sock *req_unhash,
+ bool *own_req)
+{
+ struct smc_sock *smc;
+
+ smc = smc_clcsock_user_data(sk);
+
+ if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
+ sk->sk_max_ack_backlog)
+ goto drop;
+
+ if (sk_acceptq_is_full(&smc->sk)) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+ goto drop;
+ }
+
+ /* passthrough to original syn recv sock fct */
+ return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
+ own_req);
+
+drop:
+ dst_release(dst);
+ tcp_listendrop(sk);
+ return NULL;
+}
+
+static bool smc_hs_congested(const struct sock *sk)
+{
+ const struct smc_sock *smc;
+
+ smc = smc_clcsock_user_data(sk);
+
+ if (!smc)
+ return true;
+
+ if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
+ return true;
+
+ return false;
+}
+
static struct smc_hashinfo smc_v4_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
};
static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
{
struct sock *clcsk;
+ int rc = 0;
mutex_lock(&smc->clcsock_release_lock);
if (!smc->clcsock) {
- mutex_unlock(&smc->clcsock_release_lock);
- return -EBADF;
+ rc = -EBADF;
+ goto out;
}
clcsk = smc->clcsock->sk;
+ if (smc->use_fallback)
+ goto out;
smc->use_fallback = true;
smc->fallback_rsn = reason_code;
smc_stat_fallback(smc);
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
}
+out:
mutex_unlock(&smc->clcsock_release_lock);
- return 0;
+ return rc;
}
/* fall back during connect */
/* perform CLC handshake */
rc = smc_connect_clc(smc, aclc2, ini);
- if (rc)
+ if (rc) {
+ /* -EAGAIN on timeout, see tcp_recvmsg() */
+ if (rc == -EAGAIN) {
+ rc = -ETIMEDOUT;
+ smc->sk.sk_err = ETIMEDOUT;
+ }
goto vlan_cleanup;
+ }
/* check if smc modes and versions of CLC proposal and accept match */
rc = smc_connect_check_aclc(ini, aclc);
struct smc_sock *lsmc = new_smc->listen_smc;
struct sock *newsmcsk = &new_smc->sk;
+ if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
+ atomic_dec(&lsmc->queued_smc_hs);
+
if (lsmc->sk.sk_state == SMC_LISTEN) {
lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
smc_accept_enqueue(&lsmc->sk, newsmcsk);
if (!new_smc)
continue;
+ if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
+ atomic_inc(&lsmc->queued_smc_hs);
+
new_smc->listen_smc = lsmc;
new_smc->use_fallback = lsmc->use_fallback;
new_smc->fallback_rsn = lsmc->fallback_rsn;
lsmc->clcsk_data_ready(listen_clcsock);
if (lsmc->sk.sk_state == SMC_LISTEN) {
sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
- if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
+ if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
sock_put(&lsmc->sk);
}
}
smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
+
+ /* save original ops */
+ smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
+
+ smc->af_ops = *smc->ori_af_ops;
+ smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
+
+ inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
+
+ if (smc->limit_smc_hs)
+ tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
+
rc = kernel_listen(smc->clcsock, backlog);
if (rc) {
smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
return rc ? rc : rc1;
}
+static int __smc_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct smc_sock *smc;
+ int val, len;
+
+ smc = smc_sk(sock->sk);
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ len = min_t(int, len, sizeof(int));
+
+ if (len < 0)
+ return -EINVAL;
+
+ switch (optname) {
+ case SMC_LIMIT_HS:
+ val = smc->limit_smc_hs;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int __smc_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct smc_sock *smc;
+ int val, rc;
+
+ smc = smc_sk(sk);
+
+ lock_sock(sk);
+ switch (optname) {
+ case SMC_LIMIT_HS:
+ if (optlen < sizeof(int))
+ return -EINVAL;
+ if (copy_from_sockptr(&val, optval, sizeof(int)))
+ return -EFAULT;
+
+ smc->limit_smc_hs = !!val;
+ rc = 0;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ release_sock(sk);
+
+ return rc;
+}
+
static int smc_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int optlen)
{
if (level == SOL_TCP && optname == TCP_ULP)
return -EOPNOTSUPP;
+ else if (level == SOL_SMC)
+ return __smc_setsockopt(sock, level, optname, optval, optlen);
smc = smc_sk(sk);
struct smc_sock *smc;
int rc;
+ if (level == SOL_SMC)
+ return __smc_getsockopt(sock, level, optname, optval, optlen);
+
smc = smc_sk(sock->sk);
mutex_lock(&smc->clcsock_release_lock);
if (!smc->clcsock) {
smc->use_fallback = false; /* assume rdma capability first */
smc->fallback_rsn = 0;
+ /* default behavior from limit_smc_hs in every net namespace */
+ smc->limit_smc_hs = net->smc.limit_smc_hs;
+
rc = 0;
if (!clcsock) {
rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
goto out_nl;
rc = -ENOMEM;
+
+ smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0);
+ if (!smc_tcp_ls_wq)
+ goto out_pnet;
+
smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
if (!smc_hs_wq)
- goto out_pnet;
+ goto out_alloc_tcp_ls_wq;
smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
if (!smc_close_wq)
destroy_workqueue(smc_close_wq);
out_alloc_hs_wq:
destroy_workqueue(smc_hs_wq);
+out_alloc_tcp_ls_wq:
+ destroy_workqueue(smc_tcp_ls_wq);
out_pnet:
smc_pnet_exit();
out_nl:
smc_core_exit();
smc_ib_unregister_client();
destroy_workqueue(smc_close_wq);
+ destroy_workqueue(smc_tcp_ls_wq);
destroy_workqueue(smc_hs_wq);
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
#include <linux/socket.h>
#include <linux/types.h>
#include <linux/compiler.h> /* __aligned */
+#include <net/genetlink.h>
#include <net/sock.h>
#include "smc_ib.h"
struct work_struct smc_listen_work;/* prepare new accept socket */
struct list_head accept_q; /* sockets to be accepted */
spinlock_t accept_q_lock; /* protects accept_q */
+ bool limit_smc_hs; /* put constraint on handshake */
bool use_fallback; /* fallback to tcp */
int fallback_rsn; /* reason for fallback */
u32 peer_diagnosis; /* decline reason from peer */
+ atomic_t queued_smc_hs; /* queued smc handshakes */
+ struct inet_connection_sock_af_ops af_ops;
+ const struct inet_connection_sock_af_ops *ori_af_ops;
+ /* original af ops */
int sockopt_defer_accept;
/* sockopt TCP_DEFER_ACCEPT
* value
return (struct smc_sock *)sk;
}
-static inline struct smc_sock *smc_clcsock_user_data(struct sock *clcsk)
+static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
{
return (struct smc_sock *)
((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
struct smc_gidlist *gidlist,
struct smc_ib_device *known_dev, u8 *known_gid);
+/* smc handshake limitation interface for netlink */
+int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
+int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
+
#endif /* __SMC_H */
.flags = GENL_ADMIN_PERM,
.doit = smc_nl_disable_seid,
},
+ {
+ .cmd = SMC_NETLINK_DUMP_HS_LIMITATION,
+ /* can be retrieved by unprivileged users */
+ .dumpit = smc_nl_dump_hs_limitation,
+ },
+ {
+ .cmd = SMC_NETLINK_ENABLE_HS_LIMITATION,
+ .flags = GENL_ADMIN_PERM,
+ .doit = smc_nl_enable_hs_limitation,
+ },
+ {
+ .cmd = SMC_NETLINK_DISABLE_HS_LIMITATION,
+ .flags = GENL_ADMIN_PERM,
+ .doit = smc_nl_disable_hs_limitation,
+ },
};
static const struct nla_policy smc_gen_nl_policy[2] = {
new_pe->type = SMC_PNET_ETH;
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
- new_pe->ndev = ndev;
- if (ndev)
- netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL);
rc = -EEXIST;
new_netdev = true;
write_lock(&pnettable->lock);
}
}
if (new_netdev) {
+ if (ndev) {
+ new_pe->ndev = ndev;
+ netdev_tracker_alloc(ndev, &new_pe->dev_tracker,
+ GFP_ATOMIC);
+ }
list_add_tail(&new_pe->list, &pnettable->pnetlist);
write_unlock(&pnettable->lock);
} else {
smc_pnet_create_pnetids_list(net);
+ /* disable handshake limitation by default */
+ net->smc.limit_smc_hs = 0;
+
return 0;
}
return rc;
}
+/* Wakeup sndbuf consumers from process context
+ * since there is more data to transmit. The caller
+ * must hold sock lock.
+ */
void smc_tx_pending(struct smc_connection *conn)
{
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
}
/* Wakeup sndbuf consumers from process context
- * since there is more data to transmit
+ * since there is more data to transmit in locked
+ * sock.
*/
void smc_tx_work(struct work_struct *work)
{
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is bound.
- * Returns 0 or an error code.
+ * Returns the length of the address in bytes or an error code.
*/
int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
* @addr: address holder
*
* Fills the @addr pointer with the address which the socket is connected.
- * Returns 0 or an error code.
+ * Returns the length of the address in bytes or an error code.
*/
int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
}
sock = container_of(xprt, struct sock_xprt, xprt);
- if (kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
+ mutex_lock(&sock->recv_mutex);
+ if (sock->sock == NULL ||
+ kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
goto out;
ret = sprintf(buf, "%pISc\n", &saddr);
out:
+ mutex_unlock(&sock->recv_mutex);
xprt_put(xprt);
return ret + 1;
}
IB_POLL_WORKQUEUE);
if (IS_ERR(ep->re_attr.send_cq)) {
rc = PTR_ERR(ep->re_attr.send_cq);
+ ep->re_attr.send_cq = NULL;
goto out_destroy;
}
IB_POLL_WORKQUEUE);
if (IS_ERR(ep->re_attr.recv_cq)) {
rc = PTR_ERR(ep->re_attr.recv_cq);
+ ep->re_attr.recv_cq = NULL;
goto out_destroy;
}
ep->re_receive_count = 0;
ep->re_pd = ib_alloc_pd(device, 0);
if (IS_ERR(ep->re_pd)) {
rc = PTR_ERR(ep->re_pd);
+ ep->re_pd = NULL;
goto out_destroy;
}
unsigned short get_srcport(struct rpc_xprt *xprt)
{
struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
- return xs_sock_getport(sock->sock);
+ unsigned short ret = 0;
+ mutex_lock(&sock->recv_mutex);
+ if (sock->sock)
+ ret = xs_sock_getport(sock->sock);
+ mutex_unlock(&sock->recv_mutex);
+ return ret;
}
EXPORT_SYMBOL(get_srcport);
return 0;
}
+static struct net_device *
+switchdev_lower_dev_find_rcu(struct net_device *dev,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev))
+{
+ struct switchdev_nested_priv switchdev_priv = {
+ .check_cb = check_cb,
+ .foreign_dev_check_cb = foreign_dev_check_cb,
+ .dev = dev,
+ .lower_dev = NULL,
+ };
+ struct netdev_nested_priv priv = {
+ .data = &switchdev_priv,
+ };
+
+ netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
+
+ return switchdev_priv.lower_dev;
+}
+
static struct net_device *
switchdev_lower_dev_find(struct net_device *dev,
bool (*check_cb)(const struct net_device *dev),
.data = &switchdev_priv,
};
- netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
+ netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
return switchdev_priv.lower_dev;
}
return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
if (netif_is_lag_master(dev)) {
- if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
+ if (!switchdev_lower_dev_find_rcu(dev, check_cb, foreign_dev_check_cb))
goto maybe_bridged_with_us;
/* This is a LAG interface that we offload */
* towards a bridge device.
*/
if (netif_is_bridge_master(dev)) {
- if (!switchdev_lower_dev_find(dev, check_cb, foreign_dev_check_cb))
+ if (!switchdev_lower_dev_find_rcu(dev, check_cb, foreign_dev_check_cb))
return 0;
/* This is a bridge interface that we offload */
* that we offload.
*/
if (!check_cb(lower_dev) &&
- !switchdev_lower_dev_find(lower_dev, check_cb,
- foreign_dev_check_cb))
+ !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
+ foreign_dev_check_cb))
continue;
err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
if (!br || !netif_is_bridge_master(br))
return 0;
- if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
+ if (!switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb))
return 0;
return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
static int __switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
int (*add_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj,
struct netlink_ext_ack *extack))
{
struct switchdev_notifier_info *info = &port_obj_info->info;
+ struct net_device *br, *lower_dev;
struct netlink_ext_ack *extack;
- struct net_device *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
if (netif_is_bridge_master(lower_dev))
continue;
+ /* When searching for switchdev interfaces that are neighbors
+ * of foreign ones, and @dev is a bridge, do not recurse on the
+ * foreign interface again, it was already visited.
+ */
+ if (foreign_dev_check_cb && !check_cb(lower_dev) &&
+ !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
+ continue;
+
err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
- check_cb, add_cb);
+ check_cb, foreign_dev_check_cb,
+ add_cb);
if (err && err != -EOPNOTSUPP)
return err;
}
- return err;
+ /* Event is neither on a bridge nor a LAG. Check whether it is on an
+ * interface that is in a bridge with us.
+ */
+ if (!foreign_dev_check_cb)
+ return err;
+
+ br = netdev_master_upper_dev_get(dev);
+ if (!br || !netif_is_bridge_master(br))
+ return err;
+
+ if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
+ return err;
+
+ return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
+ foreign_dev_check_cb, add_cb);
}
+/* Pass through a port object addition, if @dev passes @check_cb, or replicate
+ * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
+ * bridge or a LAG.
+ */
int switchdev_handle_port_obj_add(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
int err;
err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
- add_cb);
+ NULL, add_cb);
if (err == -EOPNOTSUPP)
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
+/* Same as switchdev_handle_port_obj_add(), except if object is notified on a
+ * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
+ * that pass @check_cb and are in the same bridge as @dev.
+ */
+int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*add_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack))
+{
+ int err;
+
+ err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
+ foreign_dev_check_cb, add_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
+
static int __switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
int (*del_cb)(struct net_device *dev, const void *ctx,
const struct switchdev_obj *obj))
{
struct switchdev_notifier_info *info = &port_obj_info->info;
- struct net_device *lower_dev;
+ struct net_device *br, *lower_dev;
struct list_head *iter;
int err = -EOPNOTSUPP;
if (netif_is_bridge_master(lower_dev))
continue;
+ /* When searching for switchdev interfaces that are neighbors
+ * of foreign ones, and @dev is a bridge, do not recurse on the
+ * foreign interface again, it was already visited.
+ */
+ if (foreign_dev_check_cb && !check_cb(lower_dev) &&
+ !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
+ continue;
+
err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
- check_cb, del_cb);
+ check_cb, foreign_dev_check_cb,
+ del_cb);
if (err && err != -EOPNOTSUPP)
return err;
}
- return err;
+ /* Event is neither on a bridge nor a LAG. Check whether it is on an
+ * interface that is in a bridge with us.
+ */
+ if (!foreign_dev_check_cb)
+ return err;
+
+ br = netdev_master_upper_dev_get(dev);
+ if (!br || !netif_is_bridge_master(br))
+ return err;
+
+ if (!switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb))
+ return err;
+
+ return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
+ foreign_dev_check_cb, del_cb);
}
+/* Pass through a port object deletion, if @dev passes @check_cb, or replicate
+ * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
+ * bridge or a LAG.
+ */
int switchdev_handle_port_obj_del(struct net_device *dev,
struct switchdev_notifier_port_obj_info *port_obj_info,
bool (*check_cb)(const struct net_device *dev),
int err;
err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
- del_cb);
+ NULL, del_cb);
if (err == -EOPNOTSUPP)
err = 0;
return err;
}
EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
+/* Same as switchdev_handle_port_obj_del(), except if object is notified on a
+ * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
+ * that pass @check_cb and are in the same bridge as @dev.
+ */
+int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
+ struct switchdev_notifier_port_obj_info *port_obj_info,
+ bool (*check_cb)(const struct net_device *dev),
+ bool (*foreign_dev_check_cb)(const struct net_device *dev,
+ const struct net_device *foreign_dev),
+ int (*del_cb)(struct net_device *dev, const void *ctx,
+ const struct switchdev_obj *obj))
+{
+ int err;
+
+ err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
+ foreign_dev_check_cb, del_cb);
+ if (err == -EOPNOTSUPP)
+ err = 0;
+ return err;
+}
+EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
+
static int __switchdev_handle_port_attr_set(struct net_device *dev,
struct switchdev_notifier_port_attr_info *port_attr_info,
bool (*check_cb)(const struct net_device *dev),
struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
struct tipc_aead_key *skey = NULL;
u16 key_gen = msg_key_gen(hdr);
- u16 size = msg_data_sz(hdr);
+ u32 size = msg_data_sz(hdr);
u8 *data = msg_data(hdr);
unsigned int keylen;
struct tipc_msg *hdr = buf_msg(skb);
struct tipc_gap_ack_blks *ga = NULL;
bool reply = msg_probe(hdr), retransmitted = false;
- u16 dlen = msg_data_sz(hdr), glen = 0;
+ u32 dlen = msg_data_sz(hdr), glen = 0;
u16 peers_snd_nxt = msg_next_sent(hdr);
u16 peers_tol = msg_link_tolerance(hdr);
u16 peers_prio = msg_linkprio(hdr);
void *data;
trace_tipc_proto_rcv(skb, false, l->name);
+
+ if (dlen > U16_MAX)
+ goto exit;
+
if (tipc_link_is_blocked(l) || !xmitq)
goto exit;
/* Receive Gap ACK blocks from peer if any */
glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
-
+ if(glen > dlen)
+ break;
tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
&l->mon_state, l->bearer_id);
state->probing = false;
/* Sanity check received domain record */
+ if (new_member_cnt > MAX_MON_DOMAIN)
+ return;
if (dlen < dom_rec_len(arrv_dom, 0))
return;
if (dlen != dom_rec_len(arrv_dom, new_member_cnt))
pr_warn_ratelimited("Failed to remove binding %u,%u from %u\n",
ua.sr.type, ua.sr.lower, node);
} else {
- pr_warn("Unrecognized name table message received\n");
+ pr_warn_ratelimited("Unknown name table message received\n");
}
return false;
}
u32 flags = n->action_flags;
struct list_head *publ_list;
struct tipc_uaddr ua;
- u32 bearer_id;
+ u32 bearer_id, node;
if (likely(!flags)) {
write_unlock_bh(&n->lock);
tipc_uaddr(&ua, TIPC_SERVICE_RANGE, TIPC_NODE_SCOPE,
TIPC_LINK_STATE, n->addr, n->addr);
sk.ref = n->link_id;
- sk.node = n->addr;
+ sk.node = tipc_own_addr(net);
+ node = n->addr;
bearer_id = n->link_id & 0xffff;
publ_list = &n->publ_list;
write_unlock_bh(&n->lock);
if (flags & TIPC_NOTIFY_NODE_DOWN)
- tipc_publ_notify(net, publ_list, sk.node, n->capabilities);
+ tipc_publ_notify(net, publ_list, node, n->capabilities);
if (flags & TIPC_NOTIFY_NODE_UP)
- tipc_named_node_up(net, sk.node, n->capabilities);
+ tipc_named_node_up(net, node, n->capabilities);
if (flags & TIPC_NOTIFY_LINK_UP) {
- tipc_mon_peer_up(net, sk.node, bearer_id);
+ tipc_mon_peer_up(net, node, bearer_id);
tipc_nametbl_publish(net, &ua, &sk, sk.ref);
}
if (flags & TIPC_NOTIFY_LINK_DOWN) {
- tipc_mon_peer_down(net, sk.node, bearer_id);
+ tipc_mon_peer_down(net, node, bearer_id);
tipc_nametbl_withdraw(net, &ua, &sk, sk.ref);
}
}
sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
sock->state = SS_UNCONNECTED;
vsock_transport_cancel_pkt(vsk);
+ vsock_remove_connected(vsk);
goto out_wait;
} else if (timeout == 0) {
err = -ETIMEDOUT;
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2021 Intel Corporation
+ * Copyright (C) 2018-2022 Intel Corporation
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
{
struct wireless_dev *wdev, *tmp;
- bool found = false;
ASSERT_RTNL();
- list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
+ list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
if (wdev->nl_owner_dead) {
if (wdev->netdev)
dev_close(wdev->netdev);
- found = true;
- }
- }
-
- if (!found)
- return;
- wiphy_lock(&rdev->wiphy);
- list_for_each_entry_safe(wdev, tmp, &rdev->wiphy.wdev_list, list) {
- if (wdev->nl_owner_dead) {
+ wiphy_lock(&rdev->wiphy);
cfg80211_leave(rdev, wdev);
rdev_del_virtual_intf(rdev, wdev);
+ wiphy_unlock(&rdev->wiphy);
}
}
- wiphy_unlock(&rdev->wiphy);
}
static void cfg80211_destroy_iface_wk(struct work_struct *work)
/*
* Copyright (C) 2018 - 2021 Intel Corporation
*/
-#ifndef __PMSR_H
-#define __PMSR_H
#include <net/cfg80211.h>
#include "core.h"
#include "nl80211.h"
}
spin_unlock_bh(&wdev->pmsr_lock);
}
-
-#endif /* __PMSR_H */
for (i = 0; i < request->n_ssids; i++) {
/* wildcard ssid in the scan request */
- if (!request->ssids[i].ssid_len)
+ if (!request->ssids[i].ssid_len) {
+ if (ap->multi_bss && !ap->transmitted_bssid)
+ continue;
+
return true;
+ }
if (ap->ssid_len &&
ap->ssid_len == request->ssids[i].ssid_len) {
!cfg80211_find_ssid_match(ap, request))
continue;
+ if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
+ continue;
+
cfg80211_scan_req_add_chan(request, chan, true);
memcpy(scan_6ghz_params->bssid, ap->bssid, ETH_ALEN);
scan_6ghz_params->short_ssid = ap->short_ssid;
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/export.h>
#include <linux/bitops.h>
if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) &&
tmp.h_proto != htons(ETH_P_AARP) &&
tmp.h_proto != htons(ETH_P_IPX)) ||
- ether_addr_equal(payload.hdr, bridge_tunnel_header)))
+ ether_addr_equal(payload.hdr, bridge_tunnel_header))) {
/* remove RFC1042 or Bridge-Tunnel encapsulation and
* replace EtherType */
hdrlen += ETH_ALEN + 2;
- else
+ skb_postpull_rcsum(skb, &payload, ETH_ALEN + 2);
+ } else {
tmp.h_proto = htons(skb->len - hdrlen);
+ }
pskb_pull(skb, hdrlen);
#include <sys/prctl.h>
#include <unistd.h>
-static int install_filter(int nr, int arch, int error)
+static int install_filter(int arch, int nr, int error)
{
struct sock_filter filter[] = {
BPF_STMT(BPF_LD+BPF_W+BPF_ABS,
.len = (unsigned short)(sizeof(filter)/sizeof(filter[0])),
.filter = filter,
};
+ if (error == -1) {
+ struct sock_filter kill = BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL);
+ filter[4] = kill;
+ }
if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) {
perror("prctl(NO_NEW_PRIVS)");
return 1;
{
if (argc < 5) {
fprintf(stderr, "Usage:\n"
- "dropper <syscall_nr> <arch> <errno> <prog> [<args>]\n"
+ "dropper <arch> <syscall_nr> <errno> <prog> [<args>]\n"
"Hint: AUDIT_ARCH_I386: 0x%X\n"
" AUDIT_ARCH_X86_64: 0x%X\n"
+ " errno == -1 means SECCOMP_RET_KILL\n"
"\n", AUDIT_ARCH_I386, AUDIT_ARCH_X86_64);
return 1;
}
KBUILD_CFLAGS += -Wno-format-zero-length
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
+KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
endif
endif
fprintf(out, "\n$(deps_config): ;\n");
- if (ferror(out)) /* error check for all fprintf() calls */
- return -1;
-
+ ret = ferror(out); /* error check for all fprintf() calls */
fclose(out);
+ if (ret)
+ return -1;
if (rename(tmp, name)) {
perror("rename");
static int conf_touch_deps(void)
{
- const char *name;
+ const char *name, *tmp;
struct symbol *sym;
int res, i;
- strcpy(depfile_path, "include/config/");
- depfile_prefix_len = strlen(depfile_path);
-
name = conf_get_autoconfig_name();
+ tmp = strrchr(name, '/');
+ depfile_prefix_len = tmp ? tmp - name + 1 : 0;
+ if (depfile_prefix_len + 1 > sizeof(depfile_path))
+ return -1;
+
+ strncpy(depfile_path, name, depfile_prefix_len);
+ depfile_path[depfile_prefix_len] = 0;
+
conf_read_simple(name, S_DEF_AUTO);
sym_calc_value(modules_sym);
print_symbol(file, sym);
/* check possible errors in conf_write_heading() and print_symbol() */
- if (ferror(file))
- return -1;
-
+ ret = ferror(file);
fclose(file);
+ if (ret)
+ return -1;
if (rename(tmp, filename)) {
perror("rename");
static char *do_shell(int argc, char *argv[])
{
FILE *p;
- char buf[256];
+ char buf[4096];
char *cmd;
size_t nread;
int i;
pk = asymmetric_key_public_key(key);
pks.pkey_algo = pk->pkey_algo;
- if (!strcmp(pk->pkey_algo, "rsa"))
+ if (!strcmp(pk->pkey_algo, "rsa")) {
pks.encoding = "pkcs1";
- else if (!strncmp(pk->pkey_algo, "ecdsa-", 6))
+ } else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) {
/* edcsa-nist-p192 etc. */
pks.encoding = "x962";
- else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
- !strcmp(pk->pkey_algo, "sm2"))
+ } else if (!strcmp(pk->pkey_algo, "ecrdsa") ||
+ !strcmp(pk->pkey_algo, "sm2")) {
pks.encoding = "raw";
- else
- return -ENOPKG;
+ } else {
+ ret = -ENOPKG;
+ goto out;
+ }
pks.digest = (u8 *)data;
pks.digest_size = datalen;
pks.s = hdr->sig;
pks.s_size = siglen;
ret = verify_signature(key, &pks);
+out:
key_put(key);
pr_debug("%s() = %d\n", __func__, ret);
return ret;
return 0;
out:
+ securityfs_remove(ima_policy);
securityfs_remove(violations);
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_symlink);
securityfs_remove(ima_dir);
- securityfs_remove(ima_policy);
return -1;
}
rcu_read_lock();
+ /* Do not print rules with inactive LSM labels */
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (entry->lsm[i].args_p && !entry->lsm[i].rule) {
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+
if (entry->action & MEASURE)
seq_puts(m, pt(Opt_measure));
if (entry->action & DONT_MEASURE)
static LIST_HEAD(defined_templates);
static DEFINE_SPINLOCK(template_list);
+static int template_setup_done;
static const struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
struct ima_template_desc *template_desc;
int template_len = strlen(str);
- if (ima_template)
+ if (template_setup_done)
return 1;
- ima_init_template_list();
+ if (!ima_template)
+ ima_init_template_list();
/*
* Verify that a template with the supplied name exists.
}
ima_template = template_desc;
+ template_setup_done = 1;
return 1;
}
__setup("ima_template=", ima_template_setup);
{
int num_templates = ARRAY_SIZE(builtin_templates);
- if (ima_template)
+ if (template_setup_done)
return 1;
if (template_desc_init_fields(str, NULL, NULL) < 0) {
builtin_templates[num_templates - 1].fmt = str;
ima_template = builtin_templates + num_templates - 1;
+ template_setup_done = 1;
return 1;
}
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, audit_msgno);
+ if (!ab)
+ return;
audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
task_pid_nr(current),
from_kuid(&init_user_ns, current_uid()),
}
EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream)
+{
+ unsigned long flags = 0;
+ if (substream->pcm->nonatomic)
+ mutex_lock_nested(&substream->self_group.mutex,
+ SINGLE_DEPTH_NESTING);
+ else
+ spin_lock_irqsave_nested(&substream->self_group.lock, flags,
+ SINGLE_DEPTH_NESTING);
+ return flags;
+}
+EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave_nested);
+
/**
* snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream
* @substream: PCM substream
static int
sdw_intel_scan_controller(struct sdw_intel_acpi_info *info)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle);
int ret, i;
u8 count;
- if (acpi_bus_get_device(info->handle, &adev))
+ if (!adev)
return -EINVAL;
/* Found controller, find links supported */
void *cdata, void **return_value)
{
struct sdw_intel_acpi_info *info = cdata;
- struct acpi_device *adev;
acpi_status status;
u64 adr;
if (ACPI_FAILURE(status))
return AE_OK; /* keep going */
- if (acpi_bus_get_device(handle, &adev)) {
+ if (!acpi_fetch_acpi_dev(handle)) {
pr_err("%s: Couldn't find ACPI handle\n", __func__);
return AE_NOT_FOUND;
}
int id = HDA_FIXUP_ID_NOT_SET;
const char *name = NULL;
const char *type = NULL;
- int vendor, device;
+ unsigned int vendor, device;
if (codec->fixup_id != HDA_FIXUP_ID_NOT_SET)
return;
{
struct hda_pcm *cpcm;
+ /* Skip the shutdown if codec is not registered */
+ if (!codec->registered)
+ return;
+
list_for_each_entry(cpcm, &codec->pcm_list_head, list)
snd_pcm_suspend_all(cpcm->pcm);
free_kctls(spec);
snd_array_free(&spec->paths);
snd_array_free(&spec->loopback_list);
+#ifdef CONFIG_SND_HDA_GENERIC_LEDS
+ if (spec->led_cdevs[LED_AUDIO_MUTE])
+ led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MUTE]);
+ if (spec->led_cdevs[LED_AUDIO_MICMUTE])
+ led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MICMUTE]);
+#endif
}
/*
enum led_brightness),
bool micmute)
{
+ struct hda_gen_spec *spec = codec->spec;
struct led_classdev *cdev;
+ int idx = micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE;
+ int err;
cdev = devm_kzalloc(&codec->core.dev, sizeof(*cdev), GFP_KERNEL);
if (!cdev)
cdev->max_brightness = 1;
cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute";
cdev->brightness_set_blocking = callback;
- cdev->brightness = ledtrig_audio_get(micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE);
+ cdev->brightness = ledtrig_audio_get(idx);
cdev->flags = LED_CORE_SUSPENDRESUME;
- return devm_led_classdev_register(&codec->core.dev, cdev);
+ err = led_classdev_register(&codec->core.dev, cdev);
+ if (err < 0)
+ return err;
+ spec->led_cdevs[idx] = cdev;
+ return 0;
}
/**
struct hda_jack_callback *cb);
void (*mic_autoswitch_hook)(struct hda_codec *codec,
struct hda_jack_callback *cb);
+
+ /* leds */
+ struct led_classdev *led_cdevs[NUM_AUDIO_LEDS];
};
/* values for add_stereo_mix_input flag */
unsigned int gpio_mic_led_mask;
struct alc_coef_led mute_led_coef;
struct alc_coef_led mic_led_coef;
+ struct mutex coef_mutex;
hda_nid_t headset_mic_pin;
hda_nid_t headphone_mic_pin;
* COEF access helper functions
*/
-static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
- unsigned int coef_idx)
+static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx)
{
unsigned int val;
return val;
}
+static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx)
+{
+ struct alc_spec *spec = codec->spec;
+ unsigned int val;
+
+ mutex_lock(&spec->coef_mutex);
+ val = __alc_read_coefex_idx(codec, nid, coef_idx);
+ mutex_unlock(&spec->coef_mutex);
+ return val;
+}
+
#define alc_read_coef_idx(codec, coef_idx) \
alc_read_coefex_idx(codec, 0x20, coef_idx)
-static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
- unsigned int coef_idx, unsigned int coef_val)
+static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int coef_val)
{
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx);
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val);
}
+static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int coef_val)
+{
+ struct alc_spec *spec = codec->spec;
+
+ mutex_lock(&spec->coef_mutex);
+ __alc_write_coefex_idx(codec, nid, coef_idx, coef_val);
+ mutex_unlock(&spec->coef_mutex);
+}
+
#define alc_write_coef_idx(codec, coef_idx, coef_val) \
alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val)
+static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
+ unsigned int coef_idx, unsigned int mask,
+ unsigned int bits_set)
+{
+ unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx);
+
+ if (val != -1)
+ __alc_write_coefex_idx(codec, nid, coef_idx,
+ (val & ~mask) | bits_set);
+}
+
static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid,
unsigned int coef_idx, unsigned int mask,
unsigned int bits_set)
{
- unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx);
+ struct alc_spec *spec = codec->spec;
- if (val != -1)
- alc_write_coefex_idx(codec, nid, coef_idx,
- (val & ~mask) | bits_set);
+ mutex_lock(&spec->coef_mutex);
+ __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set);
+ mutex_unlock(&spec->coef_mutex);
}
#define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \
static void alc_process_coef_fw(struct hda_codec *codec,
const struct coef_fw *fw)
{
+ struct alc_spec *spec = codec->spec;
+
+ mutex_lock(&spec->coef_mutex);
for (; fw->nid; fw++) {
if (fw->mask == (unsigned short)-1)
- alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
+ __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val);
else
- alc_update_coefex_idx(codec, fw->nid, fw->idx,
- fw->mask, fw->val);
+ __alc_update_coefex_idx(codec, fw->nid, fw->idx,
+ fw->mask, fw->val);
}
+ mutex_unlock(&spec->coef_mutex);
}
/*
codec->spdif_status_reset = 1;
codec->forced_resume = 1;
codec->patch_ops = alc_patch_ops;
+ mutex_init(&spec->coef_mutex);
err = alc_codec_rename_from_preset(codec);
if (err < 0) {
{
static const hda_nid_t conn1[] = { 0x0c };
static const struct coef_fw gb_x570_coefs[] = {
+ WRITE_COEF(0x07, 0x03c0),
WRITE_COEF(0x1a, 0x01c1),
WRITE_COEF(0x1b, 0x0202),
WRITE_COEF(0x43, 0x3005),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570),
- SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570),
+ SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570),
SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
{.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"},
{.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"},
{.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"},
+ {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"},
{.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"},
{}
};
SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
static struct snd_soc_codec_conf rt1019_conf[] = {
{
- .dlc = COMP_CODEC_CONF("i2c-10EC1019:00"),
+ .dlc = COMP_CODEC_CONF("i2c-10EC1019:01"),
.name_prefix = "Left",
},
{
- .dlc = COMP_CODEC_CONF("i2c-10EC1019:01"),
+ .dlc = COMP_CODEC_CONF("i2c-10EC1019:00"),
.name_prefix = "Right",
},
};
{
struct device_node *codec_node =
of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec");
+ if (!codec_node)
+ return -ENODEV;
pdev->dev.of_node = codec_node;
bool busy;
struct snd_soc_jack *jack;
unsigned int jack_status;
- u8 iec_status[5];
+ u8 iec_status[AES_IEC958_STATUS_SIZE];
};
static const struct snd_soc_dapm_widget hdmi_widgets[] = {
int reg, b2_reg;
/* Address does not automatically update if reading */
- reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx;
- b2_reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx;
+ reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 0x80 * iir_idx;
+ b2_reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 0x80 * iir_idx;
snd_soc_component_write(component, reg,
((band_idx * BAND_MAX + coeff_idx) *
static void set_iir_band_coeff(struct snd_soc_component *component,
int iir_idx, int band_idx, uint32_t value)
{
- int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx;
+ int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 0x80 * iir_idx;
snd_soc_component_write(component, reg, (value & 0xFF));
snd_soc_component_write(component, reg, (value >> 8) & 0xFF);
int iir_idx = ctl->iir_idx;
int band_idx = ctl->band_idx;
u32 coeff[BAND_MAX];
- int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx;
+ int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 0x80 * iir_idx;
memcpy(&coeff[0], ucontrol->value.bytes.data, params->max);
struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol);
struct max9759 *priv = snd_soc_component_get_drvdata(c);
- if (ucontrol->value.integer.value[0] > 3)
+ if (ucontrol->value.integer.value[0] < 0 ||
+ ucontrol->value.integer.value[0] > 3)
return -EINVAL;
priv->gain = ucontrol->value.integer.value[0];
struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv,
jd_check_work.work);
- if (snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
- & RT5682_JDH_RS_MASK) {
+ if (snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL) & RT5682_JDH_RS_MASK)
/* jack out */
- rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0);
-
- snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
- SND_JACK_HEADSET |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3);
- } else {
+ mod_delayed_work(system_power_efficient_wq,
+ &rt5682->jack_detect_work, 0);
+ else
schedule_delayed_work(&rt5682->jd_check_work, 500);
- }
}
static irqreturn_t rt5682_irq(int irq, void *data)
}
mutex_init(&rt5682->calibrate_mutex);
- mutex_init(&rt5682->jdet_mutex);
rt5682_calibrate(rt5682);
rt5682_apply_patch_list(rt5682, &i2c->dev);
*
* Returns detect status.
*/
-int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
+static int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
{
struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
struct snd_soc_dapm_context *dapm = &component->dapm;
unsigned int val, count;
if (jack_insert) {
- snd_soc_dapm_mutex_lock(dapm);
-
snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1,
RT5682_PWR_VREF2 | RT5682_PWR_MB,
RT5682_PWR_VREF2 | RT5682_PWR_MB);
snd_soc_component_update_bits(component, RT5682_MICBIAS_2,
RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK,
RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU);
-
- snd_soc_dapm_mutex_unlock(dapm);
} else {
rt5682_enable_push_button_irq(component, false);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
dev_dbg(component->dev, "jack_type = %d\n", rt5682->jack_type);
return rt5682->jack_type;
}
-EXPORT_SYMBOL_GPL(rt5682_headset_detect);
static int rt5682_set_jack_detect(struct snd_soc_component *component,
struct snd_soc_jack *hs_jack, void *data)
{
struct rt5682_priv *rt5682 =
container_of(work, struct rt5682_priv, jack_detect_work.work);
+ struct snd_soc_dapm_context *dapm;
int val, btn_type;
while (!rt5682->component)
while (!rt5682->component->card->instantiated)
usleep_range(10000, 15000);
- mutex_lock(&rt5682->jdet_mutex);
+ dapm = snd_soc_component_get_dapm(rt5682->component);
+
+ snd_soc_dapm_mutex_lock(dapm);
mutex_lock(&rt5682->calibrate_mutex);
val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
rt5682->irq_work_delay_time = 50;
}
+ mutex_unlock(&rt5682->calibrate_mutex);
+ snd_soc_dapm_mutex_unlock(dapm);
+
snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type,
SND_JACK_HEADSET |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
else
cancel_delayed_work_sync(&rt5682->jd_check_work);
}
-
- mutex_unlock(&rt5682->calibrate_mutex);
- mutex_unlock(&rt5682->jdet_mutex);
}
EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler);
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
- struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
RT5682_DEPOP_1, 0x60, 0x60);
snd_soc_component_update_bits(component,
RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
-
- mutex_lock(&rt5682->jdet_mutex);
-
snd_soc_component_update_bits(component, RT5682_HP_CTRL_2,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN);
usleep_range(5000, 10000);
snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1,
RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L);
-
- mutex_unlock(&rt5682->jdet_mutex);
break;
case SND_SOC_DAPM_POST_PMD:
int jack_type;
int irq_work_delay_time;
- struct mutex jdet_mutex;
};
extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES];
void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev);
-int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert);
void rt5682_jack_detect_handler(struct work_struct *work);
bool rt5682_volatile_register(struct device *dev, unsigned int reg);
return 0;
}
-static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 ch_id, u8 enable)
+static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 port_num, u8 ch_id, u8 enable)
{
- u8 port_num;
-
- port_num = wcd->ch_info[ch_id].port_num;
-
return wcd938x_sdw_connect_port(&wcd->ch_info[ch_id],
- &wcd->port_config[port_num],
+ &wcd->port_config[port_num - 1],
enable);
}
WCD938X_EAR_GAIN_MASK,
ucontrol->value.integer.value[0]);
- return 0;
+ return 1;
}
static int wcd938x_get_compander(struct snd_kcontrol *kcontrol,
struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component);
struct wcd938x_sdw_priv *wcd;
int value = ucontrol->value.integer.value[0];
+ int portidx;
struct soc_mixer_control *mc;
bool hphr;
else
wcd938x->comp1_enable = value;
+ portidx = wcd->ch_info[mc->reg].port_num;
+
if (value)
- wcd938x_connect_port(wcd, mc->reg, true);
+ wcd938x_connect_port(wcd, portidx, mc->reg, true);
else
- wcd938x_connect_port(wcd, mc->reg, false);
+ wcd938x_connect_port(wcd, portidx, mc->reg, false);
- return 0;
+ return 1;
}
static int wcd938x_ldoh_get(struct snd_kcontrol *kcontrol,
struct wcd938x_sdw_priv *wcd;
struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value;
int dai_id = mixer->shift;
- int portidx = mixer->reg;
+ int portidx, ch_idx = mixer->reg;
+
wcd = wcd938x->sdw_priv[dai_id];
+ portidx = wcd->ch_info[ch_idx].port_num;
ucontrol->value.integer.value[0] = wcd->port_enable[portidx];
struct wcd938x_sdw_priv *wcd;
struct soc_mixer_control *mixer =
(struct soc_mixer_control *)kcontrol->private_value;
- int portidx = mixer->reg;
+ int ch_idx = mixer->reg;
+ int portidx;
int dai_id = mixer->shift;
bool enable;
wcd = wcd938x->sdw_priv[dai_id];
+ portidx = wcd->ch_info[ch_idx].port_num;
if (ucontrol->value.integer.value[0])
enable = true;
else
wcd->port_enable[portidx] = enable;
- wcd938x_connect_port(wcd, portidx, enable);
+ wcd938x_connect_port(wcd, portidx, ch_idx, enable);
- return 0;
+ return 1;
}
dev_err(&op->dev, "platform_device_alloc() failed\n");
ret = platform_device_add(pdata->codec_device);
- if (ret)
+ if (ret) {
dev_err(&op->dev, "platform_device_add() failed: %d\n", ret);
+ platform_device_put(pdata->codec_device);
+ }
ret = snd_soc_register_card(card);
- if (ret)
+ if (ret) {
dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret);
+ platform_device_del(pdata->codec_device);
+ platform_device_put(pdata->codec_device);
+ }
platform_set_drvdata(op, pdata);
-
return ret;
+
}
static int pcm030_fabric_remove(struct platform_device *op)
.hw_params = asoc_simple_hw_params,
};
+static int asoc_simple_parse_platform(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc)
+{
+ struct of_phandle_args args;
+ int ret;
+
+ if (!node)
+ return 0;
+
+ /*
+ * Get node via "sound-dai = <&phandle port>"
+ * it will be used as xxx_of_node on soc_bind_dai_link()
+ */
+ ret = of_parse_phandle_with_args(node, DAI, CELL, 0, &args);
+ if (ret)
+ return ret;
+
+ /* dai_name is not required and may not exist for plat component */
+
+ dlc->of_node = args.np;
+
+ return 0;
+}
+
static int asoc_simple_parse_dai(struct device_node *node,
struct snd_soc_dai_link_component *dlc,
int *is_single_link)
if (ret < 0)
goto dai_link_of_err;
- ret = asoc_simple_parse_dai(plat, platforms, NULL);
+ ret = asoc_simple_parse_platform(plat, platforms);
if (ret < 0)
goto dai_link_of_err;
config SND_SOC_MT8195_MT6359_RT1011_RT5682
tristate "ASoC Audio driver for MT8195 with MT6359 RT1011 RT5682 codec"
- depends on I2C
+ depends on I2C && GPIOLIB
depends on SND_SOC_MT8195 && MTK_PMIC_WRAP
select SND_SOC_MT6359
select SND_SOC_RT1011
struct snd_pcm_runtime *runtime = substream->runtime;
struct q6apm_dai_rtd *prtd = runtime->private_data;
- q6apm_graph_stop(prtd->graph);
- q6apm_unmap_memory_regions(prtd->graph, substream->stream);
+ if (prtd->state) { /* only stop graph that is started */
+ q6apm_graph_stop(prtd->graph);
+ q6apm_unmap_memory_regions(prtd->graph, substream->stream);
+ }
+
q6apm_graph_close(prtd->graph);
prtd->graph = NULL;
kfree(prtd);
static acpi_status snd_soc_acpi_find_package(acpi_handle handle, u32 level,
void *context, void **ret)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
acpi_status status;
struct snd_soc_acpi_package_context *pkg_ctx = context;
pkg_ctx->data_valid = false;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
-
- if (adev->status.present && adev->status.functional) {
+ if (adev && adev->status.present && adev->status.functional) {
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *myobj = NULL;
if (sign_bit)
mask = BIT(sign_bit + 1) - 1;
- val = ((ucontrol->value.integer.value[0] + min) & mask);
+ if (ucontrol->value.integer.value[0] < 0)
+ return -EINVAL;
+ val = ucontrol->value.integer.value[0];
+ if (mc->platform_max && val > mc->platform_max)
+ return -EINVAL;
+ if (val > max - min)
+ return -EINVAL;
+ val = (val + min) & mask;
if (invert)
val = max - val;
val_mask = mask << shift;
val = val << shift;
if (snd_soc_volsw_is_stereo(mc)) {
- val2 = ((ucontrol->value.integer.value[1] + min) & mask);
+ if (ucontrol->value.integer.value[1] < 0)
+ return -EINVAL;
+ val2 = ucontrol->value.integer.value[1];
+ if (mc->platform_max && val2 > mc->platform_max)
+ return -EINVAL;
+ if (val2 > max - min)
+ return -EINVAL;
+ val2 = (val2 + min) & mask;
if (invert)
val2 = max - val2;
if (reg == reg2) {
int err = 0;
unsigned int val, val_mask;
+ if (ucontrol->value.integer.value[0] < 0)
+ return -EINVAL;
+ val = ucontrol->value.integer.value[0];
+ if (mc->platform_max && val > mc->platform_max)
+ return -EINVAL;
+ if (val > max - min)
+ return -EINVAL;
val_mask = mask << shift;
- val = (ucontrol->value.integer.value[0] + min) & mask;
+ val = (val + min) & mask;
val = val << shift;
err = snd_soc_component_update_bits(component, reg, val_mask, val);
long val = ucontrol->value.integer.value[0];
unsigned int i;
+ if (val < mc->min || val > mc->max)
+ return -EINVAL;
if (invert)
val = max - val;
val &= mask;
snd_pcm_stream_lock_irq(snd_soc_dpcm_get_substream(rtd, stream));
}
-#define snd_soc_dpcm_stream_lock_irqsave(rtd, stream, flags) \
- snd_pcm_stream_lock_irqsave(snd_soc_dpcm_get_substream(rtd, stream), flags)
+#define snd_soc_dpcm_stream_lock_irqsave_nested(rtd, stream, flags) \
+ snd_pcm_stream_lock_irqsave_nested(snd_soc_dpcm_get_substream(rtd, stream), flags)
static inline void snd_soc_dpcm_stream_unlock_irq(struct snd_soc_pcm_runtime *rtd,
int stream)
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
+ LIST_HEAD(deleted_dpcms);
snd_soc_dpcm_mutex_assert_held(fe);
/* BEs still alive need new FE */
dpcm_be_reparent(fe, dpcm->be, stream);
- dpcm_remove_debugfs_state(dpcm);
-
list_del(&dpcm->list_be);
+ list_move(&dpcm->list_fe, &deleted_dpcms);
+ }
+ snd_soc_dpcm_stream_unlock_irq(fe, stream);
+
+ while (!list_empty(&deleted_dpcms)) {
+ dpcm = list_first_entry(&deleted_dpcms, struct snd_soc_dpcm,
+ list_fe);
list_del(&dpcm->list_fe);
+ dpcm_remove_debugfs_state(dpcm);
kfree(dpcm);
}
- snd_soc_dpcm_stream_unlock_irq(fe, stream);
}
/* get BE for DAI widget and stream */
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
- snd_soc_dpcm_stream_lock_irqsave(be, stream, flags);
+ snd_soc_dpcm_stream_lock_irqsave_nested(be, stream, flags);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
#define XLNX_AUD_XFER_COUNT 0x28
#define XLNX_AUD_CH_STS_START 0x2C
#define XLNX_BYTES_PER_CH 0x44
+#define XLNX_AUD_ALIGN_BYTES 64
#define AUD_STS_IOC_IRQ_MASK BIT(31)
#define AUD_STS_CH_STS_MASK BIT(29)
snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware);
runtime->private_data = stream_data;
- /* Resize the period size divisible by 64 */
+ /* Resize the period bytes as divisible by 64 */
err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64);
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ XLNX_AUD_ALIGN_BYTES);
if (err) {
dev_err(component->dev,
- "unable to set constraint on period bytes\n");
+ "Unable to set constraint on period bytes\n");
+ return err;
+ }
+
+ /* Resize the buffer bytes as divisible by 64 */
+ err = snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ XLNX_AUD_ALIGN_BYTES);
+ if (err) {
+ dev_err(component->dev,
+ "Unable to set constraint on buffer bytes\n");
+ return err;
+ }
+
+ /* Set periods as integer multiple */
+ err = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0) {
+ dev_err(component->dev,
+ "Unable to set constraint on periods to be integer\n");
return err;
}
usb_audio_err(chip,
"cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
UAC_GET_CUR, validx, idx, cval->val_type);
+
+ if (val)
+ *val = 0;
+
return filter_error(cval, ret);
}
* combination.
*/
{
- USB_DEVICE(0x041e, 0x4095),
+ USB_AUDIO_DEVICE(0x041e, 0x4095),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
.ifnum = QUIRK_ANY_INTERFACE,
.type = QUIRK_COMPOSITE,
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
+#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
-/* Available with KVM_CAP_XSAVE2 */
-#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
-
struct kvm_s390_pv_sec_parm {
__u64 origin;
__u64 length;
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
#endif /* __LINUX_KVM_H */
/*
* User provided data if sigtrap=1, passed back to user via
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
*/
__u64 sig_data;
};
/* hop level */
#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
-#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
-#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
-#define PERF_MEM_HOPS_3 0x04 /* remote board */
+#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
+#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
+#define PERF_MEM_HOPS_3 0x04 /* remote board */
/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
#endif /* _LINUX_PRCTL_H */
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
-
+#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
#define __LIBPERF_INTERNAL_CPUMAP_H
#include <linux/refcount.h>
-
-/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
-struct perf_cpu {
- int cpu;
-};
+#include <perf/cpumap.h>
/**
* A sized, reference counted, sorted array of integers representing CPU
#include <stdio.h>
#include <stdbool.h>
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+ int cpu;
+};
+
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
global:
libperf_init;
perf_cpu_map__dummy_new;
+ perf_cpu_map__default_new;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
#include <internal/lib.h>
#include <linux/kernel.h>
#include <linux/math64.h>
+#include <linux/stringify.h>
#include "internal.h"
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
return low | ((u64)high) << 32;
}
+#elif defined(__aarch64__)
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+static u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+#define PMEVCNTR_READ(idx) \
+ static u64 read_pmevcntr_##idx(void) { \
+ return read_sysreg(pmevcntr##idx##_el0); \
+ }
+
+PMEVCNTR_READ(0);
+PMEVCNTR_READ(1);
+PMEVCNTR_READ(2);
+PMEVCNTR_READ(3);
+PMEVCNTR_READ(4);
+PMEVCNTR_READ(5);
+PMEVCNTR_READ(6);
+PMEVCNTR_READ(7);
+PMEVCNTR_READ(8);
+PMEVCNTR_READ(9);
+PMEVCNTR_READ(10);
+PMEVCNTR_READ(11);
+PMEVCNTR_READ(12);
+PMEVCNTR_READ(13);
+PMEVCNTR_READ(14);
+PMEVCNTR_READ(15);
+PMEVCNTR_READ(16);
+PMEVCNTR_READ(17);
+PMEVCNTR_READ(18);
+PMEVCNTR_READ(19);
+PMEVCNTR_READ(20);
+PMEVCNTR_READ(21);
+PMEVCNTR_READ(22);
+PMEVCNTR_READ(23);
+PMEVCNTR_READ(24);
+PMEVCNTR_READ(25);
+PMEVCNTR_READ(26);
+PMEVCNTR_READ(27);
+PMEVCNTR_READ(28);
+PMEVCNTR_READ(29);
+PMEVCNTR_READ(30);
+
+/*
+ * Read a value direct from PMEVCNTR<idx>
+ */
+static u64 read_perf_counter(unsigned int counter)
+{
+ static u64 (* const read_f[])(void) = {
+ read_pmevcntr_0,
+ read_pmevcntr_1,
+ read_pmevcntr_2,
+ read_pmevcntr_3,
+ read_pmevcntr_4,
+ read_pmevcntr_5,
+ read_pmevcntr_6,
+ read_pmevcntr_7,
+ read_pmevcntr_8,
+ read_pmevcntr_9,
+ read_pmevcntr_10,
+ read_pmevcntr_11,
+ read_pmevcntr_13,
+ read_pmevcntr_12,
+ read_pmevcntr_14,
+ read_pmevcntr_15,
+ read_pmevcntr_16,
+ read_pmevcntr_17,
+ read_pmevcntr_18,
+ read_pmevcntr_19,
+ read_pmevcntr_20,
+ read_pmevcntr_21,
+ read_pmevcntr_22,
+ read_pmevcntr_23,
+ read_pmevcntr_24,
+ read_pmevcntr_25,
+ read_pmevcntr_26,
+ read_pmevcntr_27,
+ read_pmevcntr_28,
+ read_pmevcntr_29,
+ read_pmevcntr_30,
+ read_pmccntr
+ };
+
+ if (counter < ARRAY_SIZE(read_f))
+ return (read_f[counter])();
+
+ return 0;
+}
+
+static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+
#else
static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
static u64 read_timestamp(void) { return 0; }
int test_cpumap(int argc, char **argv)
{
struct perf_cpu_map *cpus;
+ struct perf_cpu cpu;
+ int idx;
__T_START;
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
+ cpus = perf_cpu_map__default_new();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ __T("wrong cpu number", cpu.cpu != -1);
+
+ perf_cpu_map__put(cpus);
+
__T_END;
return tests_failed == 0 ? 0 : -1;
}
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
#include <sched.h>
#include <stdio.h>
#include <stdarg.h>
min = counts[0].val;
for (i = 0; i < EVENT_NUM; i++) {
- __T_VERBOSE("Event %2d -- Raw count = %lu, run = %lu, enable = %lu\n",
+ __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
i, counts[i].val, counts[i].run, counts[i].ena);
perf_counts_values__scale(&counts[i], true, &scaled);
if (scaled == 1) {
- __T_VERBOSE("\t Scaled count = %lu (%.2lf%%, %lu/%lu)\n",
+ __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
counts[i].val,
(double)counts[i].run / (double)counts[i].ena * 100.0,
counts[i].run, counts[i].ena);
struct perf_event_attr attr = {
.type = PERF_TYPE_HARDWARE,
.config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
};
int err, i;
pc = perf_evsel__mmap_base(evsel, 0, 0);
__T("failed to get mmapped address", pc);
-#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
__T("userspace counter access not supported", pc->cap_user_rdpmc);
__T("userspace counter access not enabled", pc->index);
__T("userspace counter width not set", pc->pmc_width >= 32);
static inline void *xrealloc(void *ptr, size_t size)
{
void *ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret) {
- ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret)
- die("Out of memory, realloc failed");
- }
+ if (!ret)
+ die("Out of memory, realloc failed");
return ret;
}
static inline const char *call_dest_name(struct instruction *insn)
{
- static char pvname[16];
+ static char pvname[19];
struct reloc *rel;
int idx;
int cmd_ftrace(int argc, const char **argv)
{
int ret;
+ int (*cmd_func)(struct perf_ftrace *) = NULL;
struct perf_ftrace ftrace = {
.tracer = DEFAULT_TRACER,
.target = { .uid = UINT_MAX, },
goto out_delete_filters;
}
+ switch (subcmd) {
+ case PERF_FTRACE_TRACE:
+ if (!argc && target__none(&ftrace.target))
+ ftrace.target.system_wide = true;
+ cmd_func = __cmd_ftrace;
+ break;
+ case PERF_FTRACE_LATENCY:
+ if (list_empty(&ftrace.filters)) {
+ pr_err("Should provide a function to measure\n");
+ parse_options_usage(ftrace_usage, options, "T", 1);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ cmd_func = __cmd_latency;
+ break;
+ case PERF_FTRACE_NONE:
+ default:
+ pr_err("Invalid subcommand\n");
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+
ret = target__validate(&ftrace.target);
if (ret) {
char errbuf[512];
goto out_delete_evlist;
}
- switch (subcmd) {
- case PERF_FTRACE_TRACE:
- if (!argc && target__none(&ftrace.target))
- ftrace.target.system_wide = true;
- ret = __cmd_ftrace(&ftrace);
- break;
- case PERF_FTRACE_LATENCY:
- if (list_empty(&ftrace.filters)) {
- pr_err("Should provide a function to measure\n");
- parse_options_usage(ftrace_usage, options, "T", 1);
- ret = -EINVAL;
- goto out_delete_evlist;
- }
- ret = __cmd_latency(&ftrace);
- break;
- case PERF_FTRACE_NONE:
- default:
- pr_err("Invalid subcommand\n");
- ret = -EINVAL;
- break;
- }
+ ret = cmd_func(&ftrace);
out_delete_evlist:
evlist__delete(ftrace.evlist);
return fprintf(fp, " ? ");
}
+static pid_t workload_pid = -1;
static bool done = false;
static bool interrupted = false;
-static void sig_handler(int sig)
+static void sighandler_interrupt(int sig __maybe_unused)
{
- done = true;
- interrupted = sig == SIGINT;
+ done = interrupted = true;
+}
+
+static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
+ void *context __maybe_unused)
+{
+ if (info->si_pid == workload_pid)
+ done = true;
}
static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
bool draining = false;
trace->live = true;
- signal(SIGCHLD, sig_handler);
if (!trace->raw_augmented_syscalls) {
if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
fprintf(trace->output, "Couldn't run the workload!\n");
goto out_delete_evlist;
}
+ workload_pid = evlist->workload.pid;
}
err = evlist__open(evlist);
const char * const trace_subcommands[] = { "record", NULL };
int err = -1;
char bf[BUFSIZ];
+ struct sigaction sigchld_act;
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
- signal(SIGINT, sig_handler);
+ signal(SIGINT, sighandler_interrupt);
+
+ memset(&sigchld_act, 0, sizeof(sigchld_act));
+ sigchld_act.sa_flags = SA_SIGINFO;
+ sigchld_act.sa_sigaction = sighandler_chld;
+ sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
trace.sctbl = syscalltbl__new();
perf record -d kill (test-record-data)
perf record -F 100 kill (test-record-freq)
perf record -g kill (test-record-graph-default)
+ perf record -g kill (test-record-graph-default-aarch64)
perf record --call-graph dwarf kill (test-record-graph-dwarf)
perf record --call-graph fp kill (test-record-graph-fp)
+ perf record --call-graph fp kill (test-record-graph-fp-aarch64)
perf record --group -e cycles,instructions kill (test-record-group)
perf record -e '{cycles,instructions}' kill (test-record-group1)
perf record -e '{cycles/period=1/,instructions/period=2/}:S' kill (test-record-group2)
command = record
args = --no-bpf-event -g kill >/dev/null 2>&1
ret = 1
+# arm64 enables registers in the default mode (fp)
+arch = !aarch64
[event:base-record]
sample_type=295
--- /dev/null
+[config]
+command = record
+args = --no-bpf-event -g kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
command = record
args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
ret = 1
+# arm64 enables registers in fp mode
+arch = !aarch64
[event:base-record]
sample_type=295
--- /dev/null
+[config]
+command = record
+args = --no-bpf-event --call-graph fp kill >/dev/null 2>&1
+ret = 1
+arch = aarch64
+
+[event:base-record]
+sample_type=4391
+sample_regs_user=1073741824
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
printf "static const char *prctl_options[] = {\n"
-regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*'
+regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*\/.*)?$'
egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \
sed -r "s/$regex/\2 \1/g" | \
sort -n | xargs printf "\t[%s] = \"%s\",\n"
memset(&objdump_process, 0, sizeof(objdump_process));
objdump_process.argv = objdump_argv;
objdump_process.out = -1;
+ objdump_process.err = -1;
if (start_command(&objdump_process)) {
pr_err("Failure starting to run %s\n", command);
err = -1;
pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
out:
- free(map_name);
if (!err)
*key_scan_pos += strlen(map_opt);
+
+ free(map_name);
return err;
}
idx = evsel->core.idx;
err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
if (err) {
- pr_err("bpf map lookup falied: idx=%u, event=%s, cgrp=%s\n",
+ pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
idx, evsel__name(evsel), evsel->cgrp->name);
goto out;
}
u8 timeless_decoding;
u8 snapshot_mode;
u8 data_queued;
- u8 sample_branches;
- u8 sample_instructions;
int num_cpu;
u64 latest_kernel_timestamp;
{
struct cs_etm_packet *tmp;
- if (etm->sample_branches || etm->synth_opts.last_branch ||
- etm->sample_instructions) {
+ if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
+ etm->synth_opts.instructions) {
/*
* Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
* the next incoming packet.
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
- etm->sample_branches = true;
etm->branches_sample_type = attr.sample_type;
etm->branches_id = id;
id += 1;
err = cs_etm__synth_event(session, &attr, id);
if (err)
return err;
- etm->sample_instructions = true;
etm->instructions_sample_type = attr.sample_type;
etm->instructions_id = id;
id += 1;
tidq->prev_packet->last_instr_taken_branch)
cs_etm__update_last_branch_rb(etmq, tidq);
- if (etm->sample_instructions &&
+ if (etm->synth_opts.instructions &&
tidq->period_instructions >= etm->instructions_sample_period) {
/*
* Emit instruction sample periodically
}
}
- if (etm->sample_branches) {
+ if (etm->synth_opts.branches) {
bool generate_sample = false;
/* Generate sample for tracing on packet */
goto swap_packet;
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
}
- if (etm->sample_branches &&
+ if (etm->synth_opts.branches &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
err = cs_etm__synth_branch_sample(etmq, tidq);
if (err)
* the trace.
*/
if (etmq->etm->synth_opts.last_branch &&
+ etmq->etm->synth_opts.instructions &&
tidq->prev_packet->sample_type == CS_ETM_RANGE) {
u64 addr;
ams->addr = ip;
ams->al_addr = al.addr;
+ ams->al_level = al.level;
ams->ms.maps = al.maps;
ams->ms.sym = al.sym;
ams->ms.map = al.map;
ams->addr = addr;
ams->al_addr = al.addr;
+ ams->al_level = al.level;
ams->ms.maps = al.maps;
ams->ms.sym = al.sym;
ams->ms.map = al.map;
struct map_symbol ms;
u64 addr;
u64 al_addr;
+ char al_level;
u64 phys_addr;
u64 data_page_size;
};
bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
- bit_name(HW_INDEX),
+ bit_name(TYPE_SAVE), bit_name(HW_INDEX),
{ .name = NULL, }
};
#undef bit_name
++evlist->stats.nr_unknown_id;
return 0;
}
- dump_sample(evsel, event, sample, perf_env__arch(machine->env));
if (machine == NULL) {
++evlist->stats.nr_unprocessable_samples;
+ dump_sample(evsel, event, sample, perf_env__arch(NULL));
return 0;
}
+ dump_sample(evsel, event, sample, perf_env__arch(machine->env));
return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
- he->level, bf, size, width);
+ from->al_level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
- he->level, bf, size, width);
+ to->al_level, bf, size, width);
}
return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
alias = list_prepare_entry(counter, &(evlist->core.entries), core.node);
list_for_each_entry_continue (alias, &evlist->core.entries, core.node) {
- if (strcmp(evsel__name(alias), evsel__name(counter)) ||
- alias->scale != counter->scale ||
- alias->cgrp != counter->cgrp ||
- strcmp(alias->unit, counter->unit) ||
- evsel__is_clock(alias) != evsel__is_clock(counter) ||
- !strcmp(alias->pmu_name, counter->pmu_name))
- break;
- alias->merged_stat = true;
- cb(config, alias, data, false);
+ /* Merge events with the same name, etc. but on different PMUs. */
+ if (!strcmp(evsel__name(alias), evsel__name(counter)) &&
+ alias->scale == counter->scale &&
+ alias->cgrp == counter->cgrp &&
+ !strcmp(alias->unit, counter->unit) &&
+ evsel__is_clock(alias) == evsel__is_clock(counter) &&
+ strcmp(alias->pmu_name, counter->pmu_name)) {
+ alias->merged_stat = true;
+ cb(config, alias, data, false);
+ }
}
}
perf_event__handler_t process, bool needs_mmap,
bool data_mmap, unsigned int nr_threads_synthesize)
{
+ /*
+ * When perf runs in non-root PID namespace, and the namespace's proc FS
+ * is not mounted, nsinfo__is_in_root_namespace() returns false.
+ * In this case, the proc FS is coming for the parent namespace, thus
+ * perf tool will wrongly gather process info from its parent PID
+ * namespace.
+ *
+ * To avoid the confusion that the perf tool runs in a child PID
+ * namespace but it synthesizes thread info from its parent PID
+ * namespace, returns failure with warning.
+ */
+ if (!nsinfo__is_in_root_namespace()) {
+ pr_err("Perf runs in non-root PID namespace but it tries to ");
+ pr_err("gather process info from its parent PID namespace.\n");
+ pr_err("Please mount the proc file system properly, e.g. ");
+ pr_err("add the option '--mount-proc' for unshare command.\n");
+ return -EPERM;
+ }
+
if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine,
needs_mmap, data_mmap);
elif isinstance(ex, subprocess.CalledProcessError):
print(f'{name}: FAILED')
else:
- print('{name}: unexpected exception: {ex}')
+ print(f'{name}: unexpected exception: {ex}')
continue
output = ex.output
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "timer_crash.skel.h"
+
+enum {
+ MODE_ARRAY,
+ MODE_HASH,
+};
+
+static void test_timer_crash_mode(int mode)
+{
+ struct timer_crash *skel;
+
+ skel = timer_crash__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "timer_crash__open_and_load"))
+ return;
+ skel->bss->pid = getpid();
+ skel->bss->crash_map = mode;
+ if (!ASSERT_OK(timer_crash__attach(skel), "timer_crash__attach"))
+ goto end;
+ usleep(1);
+end:
+ timer_crash__destroy(skel);
+}
+
+void test_timer_crash(void)
+{
+ if (test__start_subtest("array"))
+ test_timer_crash_mode(MODE_ARRAY);
+ if (test__start_subtest("hash"))
+ test_timer_crash_mode(MODE_HASH);
+}
int bpf_prog4(struct sk_msg_md *msg)
{
int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
- int *start, *end, *start_push, *end_push, *start_pop, *pop;
+ int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
{
int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
+ int err = 0;
__u64 flags = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_DROP;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
int bpf_prog10(struct sk_msg_md *msg)
{
int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
- int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
+ int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0;
bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
if (bytes)
bpf_msg_pull_data(msg, *start, *end, 0);
start_push = bpf_map_lookup_elem(&sock_bytes, &two);
end_push = bpf_map_lookup_elem(&sock_bytes, &three);
- if (start_push && end_push)
- bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (start_push && end_push) {
+ err = bpf_msg_push_data(msg, *start_push, *end_push, 0);
+ if (err)
+ return SK_PASS;
+ }
start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
pop = bpf_map_lookup_elem(&sock_bytes, &five);
if (start_pop && pop)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_elem {
+ struct bpf_timer timer;
+ struct bpf_spin_lock lock;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} amap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, struct map_elem);
+} hmap SEC(".maps");
+
+int pid = 0;
+int crash_map = 0; /* 0 for amap, 1 for hmap */
+
+SEC("fentry/do_nanosleep")
+int sys_enter(void *ctx)
+{
+ struct map_elem *e, value = {};
+ void *map = crash_map ? (void *)&hmap : (void *)&amap;
+
+ if (bpf_get_current_task_btf()->tgid != pid)
+ return 0;
+
+ *(void **)&value = (void *)0xdeadcaf3;
+
+ bpf_map_update_elem(map, &(int){0}, &value, 0);
+ /* For array map, doing bpf_map_update_elem will do a
+ * check_and_free_timer_in_array, which will trigger the crash if timer
+ * pointer was overwritten, for hmap we need to use bpf_timer_cancel.
+ */
+ if (crash_map == 1) {
+ e = bpf_map_lookup_elem(map, &(int){0});
+ if (!e)
+ return 0;
+ bpf_timer_cancel(&e->timer);
+ }
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
int main(int argc, char *argv[])
{
- pid_t pid;
-
uid_t uid = getuid();
ksft_print_header();
#define SYSFS_PATH_MAX 256
#define DNAME_PATH_MAX 256
+/*
+ * Support ancient lirc.h which does not have these values. Can be removed
+ * once RHEL 8 is no longer a relevant testing platform.
+ */
+#if RC_PROTO_MAX < 26
+#define RC_PROTO_RCMM12 24
+#define RC_PROTO_RCMM24 25
+#define RC_PROTO_RCMM32 26
+#endif
+
static const struct {
enum rc_proto proto;
const char *name;
TEST_GEN_PROGS_x86_64 += x86_64/vmx_pmu_msrs_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_shinfo_test
TEST_GEN_PROGS_x86_64 += x86_64/xen_vmcall_test
-TEST_GEN_PROGS_x86_64 += x86_64/vmx_pi_mmio_test
TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
TEST_GEN_PROGS_x86_64 += x86_64/amx_test
TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
--- /dev/null
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ksft_skip=4
+
+NS=ns
+IP6=2001:db8:1::1/64
+TGT6=2001:db8:1::2
+TMPF=`mktemp`
+
+cleanup()
+{
+ rm -f $TMPF
+ ip netns del $NS
+}
+
+trap cleanup EXIT
+
+NSEXE="ip netns exec $NS"
+
+tcpdump -h | grep immediate-mode >> /dev/null
+if [ $? -ne 0 ]; then
+ echo "SKIP - tcpdump with --immediate-mode option required"
+ exit $ksft_skip
+fi
+
+# Namespaces
+ip netns add $NS
+
+$NSEXE sysctl -w net.ipv4.ping_group_range='0 2147483647' > /dev/null
+
+# Connectivity
+ip -netns $NS link add type dummy
+ip -netns $NS link set dev dummy0 up
+ip -netns $NS addr add $IP6 dev dummy0
+
+# Test
+BAD=0
+TOTAL=0
+
+check_result() {
+ ((TOTAL++))
+ if [ $1 -ne $2 ]; then
+ echo " Case $3 returned $1, expected $2"
+ ((BAD++))
+ fi
+}
+
+# IPV6_DONTFRAG
+for ovr in setsock cmsg both diff; do
+ for df in 0 1; do
+ for p in u i r; do
+ [ $p == "u" ] && prot=UDP
+ [ $p == "i" ] && prot=ICMP
+ [ $p == "r" ] && prot=RAW
+
+ [ $ovr == "setsock" ] && m="-F $df"
+ [ $ovr == "cmsg" ] && m="-f $df"
+ [ $ovr == "both" ] && m="-F $df -f $df"
+ [ $ovr == "diff" ] && m="-F $((1 - df)) -f $df"
+
+ $NSEXE ./cmsg_sender -s -S 2000 -6 -p $p $m $TGT6 1234
+ check_result $? $df "DONTFRAG $prot $ovr"
+ done
+ done
+done
+
+# IPV6_TCLASS
+TOS=0x10
+TOS2=0x20
+
+ip -6 -netns $NS rule add tos $TOS lookup 300
+ip -6 -netns $NS route add table 300 prohibit any
+
+for ovr in setsock cmsg both diff; do
+ for p in u i r; do
+ [ $p == "u" ] && prot=UDP
+ [ $p == "i" ] && prot=ICMP
+ [ $p == "r" ] && prot=RAW
+
+ [ $ovr == "setsock" ] && m="-C"
+ [ $ovr == "cmsg" ] && m="-c"
+ [ $ovr == "both" ] && m="-C $((TOS2)) -c"
+ [ $ovr == "diff" ] && m="-C $((TOS )) -c"
+
+ $NSEXE nohup tcpdump --immediate-mode -p -ni dummy0 -w $TMPF -c 4 2> /dev/null &
+ BG=$!
+ sleep 0.05
+
+ $NSEXE ./cmsg_sender -6 -p $p $m $((TOS2)) $TGT6 1234
+ check_result $? 0 "TCLASS $prot $ovr - pass"
+
+ while [ -d /proc/$BG ]; do
+ $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
+ done
+
+ tcpdump -r $TMPF -v 2>&1 | grep "class $TOS2" >> /dev/null
+ check_result $? 0 "TCLASS $prot $ovr - packet data"
+ rm $TMPF
+
+ [ $ovr == "both" ] && m="-C $((TOS )) -c"
+ [ $ovr == "diff" ] && m="-C $((TOS2)) -c"
+
+ $NSEXE ./cmsg_sender -6 -p $p $m $((TOS)) -s $TGT6 1234
+ check_result $? 1 "TCLASS $prot $ovr - rejection"
+ done
+done
+
+# IPV6_HOPLIMIT
+LIM=4
+
+for ovr in setsock cmsg both diff; do
+ for p in u i r; do
+ [ $p == "u" ] && prot=UDP
+ [ $p == "i" ] && prot=ICMP
+ [ $p == "r" ] && prot=RAW
+
+ [ $ovr == "setsock" ] && m="-L"
+ [ $ovr == "cmsg" ] && m="-l"
+ [ $ovr == "both" ] && m="-L $LIM -l"
+ [ $ovr == "diff" ] && m="-L $((LIM + 1)) -l"
+
+ $NSEXE nohup tcpdump --immediate-mode -p -ni dummy0 -w $TMPF -c 4 2> /dev/null &
+ BG=$!
+ sleep 0.05
+
+ $NSEXE ./cmsg_sender -6 -p $p $m $LIM $TGT6 1234
+ check_result $? 0 "HOPLIMIT $prot $ovr - pass"
+
+ while [ -d /proc/$BG ]; do
+ $NSEXE ./cmsg_sender -6 -p u $TGT6 1234
+ done
+
+ tcpdump -r $TMPF -v 2>&1 | grep "hlim $LIM[^0-9]" >> /dev/null
+ check_result $? 0 "HOPLIMIT $prot $ovr - packet data"
+ rm $TMPF
+ done
+done
+
+# IPV6 exthdr
+for p in u i r; do
+ # Very basic "does it crash" test
+ for h in h d r; do
+ $NSEXE ./cmsg_sender -p $p -6 -H $h $TGT6 1234
+ check_result $? 0 "ExtHdr $prot $ovr - pass"
+ done
+done
+
+# Summary
+if [ $BAD -ne 0 ]; then
+ echo "FAIL - $BAD/$TOTAL cases failed"
+ exit 1
+else
+ echo "OK"
+ exit 0
+fi
ERN_CMSG_RCV,
};
+struct option_cmsg_u32 {
+ bool ena;
+ unsigned int val;
+};
+
struct options {
bool silent_send;
const char *host;
const char *service;
+ unsigned int size;
struct {
unsigned int mark;
+ unsigned int dontfrag;
+ unsigned int tclass;
+ unsigned int hlimit;
} sockopt;
struct {
unsigned int family;
unsigned int type;
unsigned int proto;
} sock;
- struct {
- bool ena;
- unsigned int val;
- } mark;
+ struct option_cmsg_u32 mark;
struct {
bool ena;
unsigned int delay;
struct {
bool ena;
} ts;
+ struct {
+ struct option_cmsg_u32 dontfrag;
+ struct option_cmsg_u32 tclass;
+ struct option_cmsg_u32 hlimit;
+ struct option_cmsg_u32 exthdr;
+ } v6;
} opt = {
+ .size = 13,
.sock = {
.family = AF_UNSPEC,
.type = SOCK_DGRAM,
printf("Usage: %s [opts] <dst host> <dst port / service>\n", bin);
printf("Options:\n"
"\t\t-s Silent send() failures\n"
+ "\t\t-S send() size\n"
"\t\t-4/-6 Force IPv4 / IPv6 only\n"
"\t\t-p prot Socket protocol\n"
"\t\t (u = UDP (default); i = ICMP; r = RAW)\n"
"\t\t-M val Set SO_MARK via setsockopt\n"
"\t\t-d val Set SO_TXTIME with given delay (usec)\n"
"\t\t-t Enable time stamp reporting\n"
+ "\t\t-f val Set don't fragment via cmsg\n"
+ "\t\t-F val Set don't fragment via setsockopt\n"
+ "\t\t-c val Set TCLASS via cmsg\n"
+ "\t\t-C val Set TCLASS via setsockopt\n"
+ "\t\t-l val Set HOPLIMIT via cmsg\n"
+ "\t\t-L val Set HOPLIMIT via setsockopt\n"
+ "\t\t-H type Add an IPv6 header option\n"
+ "\t\t (h = HOP; d = DST; r = RTDST)"
"");
exit(ERN_HELP);
}
{
char o;
- while ((o = getopt(argc, argv, "46sp:m:M:d:t")) != -1) {
+ while ((o = getopt(argc, argv, "46sS:p:m:M:d:tf:F:c:C:l:L:H:")) != -1) {
switch (o) {
case 's':
opt.silent_send = true;
break;
+ case 'S':
+ opt.size = atoi(optarg);
+ break;
case '4':
opt.sock.family = AF_INET;
break;
case 't':
opt.ts.ena = true;
break;
+ case 'f':
+ opt.v6.dontfrag.ena = true;
+ opt.v6.dontfrag.val = atoi(optarg);
+ break;
+ case 'F':
+ opt.sockopt.dontfrag = atoi(optarg);
+ break;
+ case 'c':
+ opt.v6.tclass.ena = true;
+ opt.v6.tclass.val = atoi(optarg);
+ break;
+ case 'C':
+ opt.sockopt.tclass = atoi(optarg);
+ break;
+ case 'l':
+ opt.v6.hlimit.ena = true;
+ opt.v6.hlimit.val = atoi(optarg);
+ break;
+ case 'L':
+ opt.sockopt.hlimit = atoi(optarg);
+ break;
+ case 'H':
+ opt.v6.exthdr.ena = true;
+ switch (optarg[0]) {
+ case 'h':
+ opt.v6.exthdr.val = IPV6_HOPOPTS;
+ break;
+ case 'd':
+ opt.v6.exthdr.val = IPV6_DSTOPTS;
+ break;
+ case 'r':
+ opt.v6.exthdr.val = IPV6_RTHDRDSTOPTS;
+ break;
+ default:
+ printf("Error: hdr type: %s\n", optarg);
+ break;
+ }
+ break;
}
}
opt.service = argv[optind + 1];
}
+static void memrnd(void *s, size_t n)
+{
+ int *dword = s;
+ char *byte;
+
+ for (; n >= 4; n -= 4)
+ *dword++ = rand();
+ byte = (void *)dword;
+ while (n--)
+ *byte++ = rand();
+}
+
+static void
+ca_write_cmsg_u32(char *cbuf, size_t cbuf_sz, size_t *cmsg_len,
+ int level, int optname, struct option_cmsg_u32 *uopt)
+{
+ struct cmsghdr *cmsg;
+
+ if (!uopt->ena)
+ return;
+
+ cmsg = (struct cmsghdr *)(cbuf + *cmsg_len);
+ *cmsg_len += CMSG_SPACE(sizeof(__u32));
+ if (cbuf_sz < *cmsg_len)
+ error(ERN_CMSG_WR, EFAULT, "cmsg buffer too small");
+
+ cmsg->cmsg_level = level;
+ cmsg->cmsg_type = optname;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(__u32));
+ *(__u32 *)CMSG_DATA(cmsg) = uopt->val;
+}
+
static void
cs_write_cmsg(int fd, struct msghdr *msg, char *cbuf, size_t cbuf_sz)
{
msg->msg_control = cbuf;
cmsg_len = 0;
- if (opt.mark.ena) {
- cmsg = (struct cmsghdr *)(cbuf + cmsg_len);
- cmsg_len += CMSG_SPACE(sizeof(__u32));
- if (cbuf_sz < cmsg_len)
- error(ERN_CMSG_WR, EFAULT, "cmsg buffer too small");
+ ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len,
+ SOL_SOCKET, SO_MARK, &opt.mark);
+ ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len,
+ SOL_IPV6, IPV6_DONTFRAG, &opt.v6.dontfrag);
+ ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len,
+ SOL_IPV6, IPV6_TCLASS, &opt.v6.tclass);
+ ca_write_cmsg_u32(cbuf, cbuf_sz, &cmsg_len,
+ SOL_IPV6, IPV6_HOPLIMIT, &opt.v6.hlimit);
- cmsg->cmsg_level = SOL_SOCKET;
- cmsg->cmsg_type = SO_MARK;
- cmsg->cmsg_len = CMSG_LEN(sizeof(__u32));
- *(__u32 *)CMSG_DATA(cmsg) = opt.mark.val;
- }
if (opt.txtime.ena) {
struct sock_txtime so_txtime = {
.clockid = CLOCK_MONOTONIC,
*(__u32 *)CMSG_DATA(cmsg) = SOF_TIMESTAMPING_TX_SCHED |
SOF_TIMESTAMPING_TX_SOFTWARE;
}
+ if (opt.v6.exthdr.ena) {
+ cmsg = (struct cmsghdr *)(cbuf + cmsg_len);
+ cmsg_len += CMSG_SPACE(8);
+ if (cbuf_sz < cmsg_len)
+ error(ERN_CMSG_WR, EFAULT, "cmsg buffer too small");
+
+ cmsg->cmsg_level = SOL_IPV6;
+ cmsg->cmsg_type = opt.v6.exthdr.val;
+ cmsg->cmsg_len = CMSG_LEN(8);
+ *(__u64 *)CMSG_DATA(cmsg) = 0;
+ }
if (cmsg_len)
msg->msg_controllen = cmsg_len;
}
}
+static void ca_set_sockopts(int fd)
+{
+ if (opt.sockopt.mark &&
+ setsockopt(fd, SOL_SOCKET, SO_MARK,
+ &opt.sockopt.mark, sizeof(opt.sockopt.mark)))
+ error(ERN_SOCKOPT, errno, "setsockopt SO_MARK");
+ if (opt.sockopt.dontfrag &&
+ setsockopt(fd, SOL_IPV6, IPV6_DONTFRAG,
+ &opt.sockopt.dontfrag, sizeof(opt.sockopt.dontfrag)))
+ error(ERN_SOCKOPT, errno, "setsockopt IPV6_DONTFRAG");
+ if (opt.sockopt.tclass &&
+ setsockopt(fd, SOL_IPV6, IPV6_TCLASS,
+ &opt.sockopt.tclass, sizeof(opt.sockopt.tclass)))
+ error(ERN_SOCKOPT, errno, "setsockopt IPV6_TCLASS");
+ if (opt.sockopt.hlimit &&
+ setsockopt(fd, SOL_IPV6, IPV6_UNICAST_HOPS,
+ &opt.sockopt.hlimit, sizeof(opt.sockopt.hlimit)))
+ error(ERN_SOCKOPT, errno, "setsockopt IPV6_HOPLIMIT");
+}
+
int main(int argc, char *argv[])
{
- char buf[] = "blablablabla";
struct addrinfo hints, *ai;
struct iovec iov[1];
struct msghdr msg;
char cbuf[1024];
+ char *buf;
int err;
int fd;
cs_parse_args(argc, argv);
+ buf = malloc(opt.size);
+ memrnd(buf, opt.size);
+
memset(&hints, 0, sizeof(hints));
hints.ai_family = opt.sock.family;
buf[0] = ICMPV6_ECHO_REQUEST;
buf[1] = 0;
} else if (opt.sock.type == SOCK_RAW) {
- struct udphdr hdr = { 1, 2, htons(sizeof(buf)), 0 };
+ struct udphdr hdr = { 1, 2, htons(opt.size), 0 };
struct sockaddr_in6 *sin6 = (void *)ai->ai_addr;;
memcpy(buf, &hdr, sizeof(hdr));
sin6->sin6_port = htons(opt.sock.proto);
}
- if (opt.sockopt.mark &&
- setsockopt(fd, SOL_SOCKET, SO_MARK,
- &opt.sockopt.mark, sizeof(opt.sockopt.mark)))
- error(ERN_SOCKOPT, errno, "setsockopt SO_MARK");
+ ca_set_sockopts(fd);
if (clock_gettime(CLOCK_REALTIME, &time_start_real))
error(ERN_GETTIME, errno, "gettime REALTIME");
if (clock_gettime(CLOCK_MONOTONIC, &time_start_mono))
- error(ERN_GETTIME, errno, "gettime MONOTINIC");
+ error(ERN_GETTIME, errno, "gettime MONOTONIC");
iov[0].iov_base = buf;
- iov[0].iov_len = sizeof(buf);
+ iov[0].iov_len = opt.size;
memset(&msg, 0, sizeof(msg));
msg.msg_name = ai->ai_addr;
fprintf(stderr, "send failed: %s\n", strerror(errno));
err = ERN_SEND;
goto err_out;
- } else if (err != sizeof(buf)) {
+ } else if (err != (int)opt.size) {
fprintf(stderr, "short send\n");
err = ERN_SEND_SHORT;
goto err_out;
ipv6_rt_replace_mpath
}
+ipv6_rt_dsfield()
+{
+ echo
+ echo "IPv6 route with dsfield tests"
+
+ run_cmd "$IP -6 route flush 2001:db8:102::/64"
+
+ # IPv6 doesn't support routing based on dsfield
+ run_cmd "$IP -6 route add 2001:db8:102::/64 dsfield 0x04 via 2001:db8:101::2"
+ log_test $? 2 "Reject route with dsfield"
+}
+
ipv6_route_test()
{
route_setup
ipv6_rt_add
ipv6_rt_replace
+ ipv6_rt_dsfield
route_cleanup
}
police_tx_mirror_test
police_pps_rx_test
police_pps_tx_test
+ police_mtu_rx_test
+ police_mtu_tx_test
"
NUM_NETIFS=6
source tc_common.sh
tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower
}
+police_mtu_common_test() {
+ RET=0
+
+ local test_name=$1; shift
+ local dev=$1; shift
+ local direction=$1; shift
+
+ tc filter add dev $dev $direction protocol ip pref 1 handle 101 flower \
+ dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+ action police mtu 1042 conform-exceed drop/ok
+
+ # to count "conform" packets
+ tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+ dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+ action drop
+
+ mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
+ -t udp sp=12345,dp=54321 -p 1001 -c 10 -q
+
+ mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
+ -t udp sp=12345,dp=54321 -p 1000 -c 3 -q
+
+ tc_check_packets "dev $dev $direction" 101 13
+ check_err $? "wrong packet counter"
+
+ # "exceed" packets
+ local overlimits_t0=$(tc_rule_stats_get ${dev} 1 ${direction} .overlimits)
+ test ${overlimits_t0} = 10
+ check_err $? "wrong overlimits, expected 10 got ${overlimits_t0}"
+
+ # "conform" packets
+ tc_check_packets "dev $h2 ingress" 101 3
+ check_err $? "forwarding error"
+
+ tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+ tc filter del dev $dev $direction protocol ip pref 1 handle 101 flower
+
+ log_test "$test_name"
+}
+
+police_mtu_rx_test()
+{
+ police_mtu_common_test "police mtu (rx)" $rp1 ingress
+}
+
+police_mtu_tx_test()
+{
+ police_mtu_common_test "police mtu (tx)" $rp2 egress
+}
+
setup_prepare()
{
h1=${NETIFS[p1]}
pm_nl_add_endpoint $ns2 10.0.3.2 flags signal
pm_nl_add_endpoint $ns2 10.0.4.2 flags signal
run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr "signal addresses race test" 3 3 3
# the server will not signal the address terminating
# the MPC subflow
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
- conntrack_vrf.sh
+ conntrack_vrf.sh nft_synproxy.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
done
done
-[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP}
+[ ${passed} -eq 0 ] && exit ${KSELFTEST_SKIP} || exit 0
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.rp_filter=0 > /dev/null
sleep 3
--- /dev/null
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+rnd=$(mktemp -u XXXXXXXX)
+nsr="nsr-$rnd" # synproxy machine
+ns1="ns1-$rnd" # iperf client
+ns2="ns2-$rnd" # iperf server
+
+checktool (){
+ if ! $1 > /dev/null 2>&1; then
+ echo "SKIP: Could not $2"
+ exit $ksft_skip
+ fi
+}
+
+checktool "nft --version" "run test without nft tool"
+checktool "ip -Version" "run test without ip tool"
+checktool "iperf3 --version" "run test without iperf3"
+checktool "ip netns add $nsr" "create net namespace"
+
+modprobe -q nf_conntrack
+
+ip netns add $ns1
+ip netns add $ns2
+
+cleanup() {
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+ ip netns pids $ns2 | xargs kill 2>/dev/null
+ ip netns del $ns1
+ ip netns del $ns2
+
+ ip netns del $nsr
+}
+
+trap cleanup EXIT
+
+ip link add veth0 netns $nsr type veth peer name eth0 netns $ns1
+ip link add veth1 netns $nsr type veth peer name eth0 netns $ns2
+
+for dev in lo veth0 veth1; do
+ip -net $nsr link set $dev up
+done
+
+ip -net $nsr addr add 10.0.1.1/24 dev veth0
+ip -net $nsr addr add 10.0.2.1/24 dev veth1
+
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth0.forwarding=1
+ip netns exec $nsr sysctl -q net.ipv4.conf.veth1.forwarding=1
+ip netns exec $nsr sysctl -q net.netfilter.nf_conntrack_tcp_loose=0
+
+for n in $ns1 $ns2; do
+ ip -net $n link set lo up
+ ip -net $n link set eth0 up
+done
+ip -net $ns1 addr add 10.0.1.99/24 dev eth0
+ip -net $ns2 addr add 10.0.2.99/24 dev eth0
+ip -net $ns1 route add default via 10.0.1.1
+ip -net $ns2 route add default via 10.0.2.1
+
+# test basic connectivity
+if ! ip netns exec $ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
+ echo "ERROR: $ns1 cannot reach $ns2" 1>&2
+ exit 1
+fi
+
+if ! ip netns exec $ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
+ echo "ERROR: $ns2 cannot reach $ns1" 1>&2
+ exit 1
+fi
+
+ip netns exec $ns2 iperf3 -s > /dev/null 2>&1 &
+# ip netns exec $nsr tcpdump -vvv -n -i veth1 tcp | head -n 10 &
+
+sleep 1
+
+ip netns exec $nsr nft -f - <<EOF
+table inet filter {
+ chain prerouting {
+ type filter hook prerouting priority -300; policy accept;
+ meta iif veth0 tcp flags syn counter notrack
+ }
+
+ chain forward {
+ type filter hook forward priority 0; policy accept;
+
+ ct state new,established counter accept
+
+ meta iif veth0 meta l4proto tcp ct state untracked,invalid synproxy mss 1460 sack-perm timestamp
+
+ ct state invalid counter drop
+
+ # make ns2 unreachable w.o. tcp synproxy
+ tcp flags syn counter drop
+ }
+}
+EOF
+if [ $? -ne 0 ]; then
+ echo "SKIP: Cannot add nft synproxy"
+ exit $ksft_skip
+fi
+
+ip netns exec $ns1 timeout 5 iperf3 -c 10.0.2.99 -n $((1 * 1024 * 1024)) > /dev/null
+
+if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 returned an error" 1>&2
+ ret=$?
+ ip netns exec $nsr nft list ruleset
+else
+ echo "PASS: synproxy connection successful"
+fi
+
+exit $ret
} ctx;
/* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */
-#define TEST_SIG_DATA(addr) (~(unsigned long)(addr))
+#define TEST_SIG_DATA(addr, id) (~(unsigned long)(addr) + id)
-static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr)
+static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr,
+ unsigned long id)
{
struct perf_event_attr attr = {
.type = PERF_TYPE_BREAKPOINT,
.inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */
.remove_on_exec = 1, /* Required by sigtrap. */
.sigtrap = 1, /* Request synchronous SIGTRAP on event. */
- .sig_data = TEST_SIG_DATA(addr),
+ .sig_data = TEST_SIG_DATA(addr, id),
};
return attr;
}
FIXTURE_SETUP(sigtrap_threads)
{
- struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on);
+ struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on, 0);
struct sigaction action = {};
int i;
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
/* Check enabled for parent. */
ctx.iterate_on = 0;
/* Test that modification propagates to all inherited events. */
TEST_F(sigtrap_threads, modify_and_enable_event)
{
- struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on);
+ struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on, 42);
EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr), 0);
run_test_threads(_metadata, self);
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 42));
/* Check enabled for parent. */
ctx.iterate_on = 0;
EXPECT_EQ(ctx.tids_want_signal, 0);
EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on);
EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT);
- EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on));
+ EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0));
}
TEST_HARNESS_MAIN
#define PIDFD_SKIP 3
#define PIDFD_XFAIL 4
-int wait_for_pid(pid_t pid)
+static inline int wait_for_pid(pid_t pid)
{
int status, ret;
if (errno == EINTR)
goto again;
+ ksft_print_msg("waitpid returned -1, errno=%d\n", errno);
return -1;
}
- if (!WIFEXITED(status))
+ if (!WIFEXITED(status)) {
+ ksft_print_msg(
+ "waitpid !WIFEXITED, WIFSIGNALED=%d, WTERMSIG=%d\n",
+ WIFSIGNALED(status), WTERMSIG(status));
return -1;
+ }
- return WEXITSTATUS(status);
+ ret = WEXITSTATUS(status);
+ ksft_print_msg("waitpid WEXITSTATUS=%d\n", ret);
+ return ret;
}
static inline int sys_pidfd_open(pid_t pid, unsigned int flags)
#include <string.h>
#include <syscall.h>
#include <sys/wait.h>
+#include <sys/mman.h>
#include "pidfd.h"
#include "../kselftest.h"
return err->code;
}
+#define CHILD_STACK_SIZE 8192
+
struct child {
+ char *stack;
pid_t pid;
int fd;
};
struct error *err)
{
static int flags = CLONE_PIDFD | CLONE_NEWPID | CLONE_NEWNS | SIGCHLD;
- size_t stack_size = 1024;
- char *stack[1024] = { 0 };
struct child ret;
if (!(flags & CLONE_NEWUSER) && geteuid() != 0)
flags |= CLONE_NEWUSER;
+ ret.stack = mmap(NULL, CHILD_STACK_SIZE, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+ if (ret.stack == MAP_FAILED) {
+ error_set(err, -1, "mmap of stack failed (errno %d)", errno);
+ return ret;
+ }
+
#ifdef __ia64__
- ret.pid = __clone2(fn, stack, stack_size, flags, args, &ret.fd);
+ ret.pid = __clone2(fn, ret.stack, CHILD_STACK_SIZE, flags, args, &ret.fd);
#else
- ret.pid = clone(fn, stack + stack_size, flags, args, &ret.fd);
+ ret.pid = clone(fn, ret.stack + CHILD_STACK_SIZE, flags, args, &ret.fd);
#endif
if (ret.pid < 0) {
else if (r > 0)
error_set(err, r, "child %d reported: %d", child->pid, r);
+ if (munmap(child->stack, CHILD_STACK_SIZE)) {
+ error_set(err, -1, "munmap of child stack failed (errno %d)", errno);
+ r = -1;
+ }
+
return r;
}
{
int pid, pidfd = 0;
int status, ret;
- pthread_t t1;
time_t prog_start = time(NULL);
const char *test_name = "pidfd_poll check for premature notification on child thread exec";
*/
*child_exit_secs = time(NULL);
syscall(SYS_exit, 0);
+ /* Never reached, but appeases compiler thinking we should return. */
+ exit(0);
}
static void test_pidfd_poll_leader_exit(int use_waitpid)
{
int pid, pidfd = 0;
- int status, ret;
- time_t prog_start = time(NULL);
+ int status, ret = 0;
const char *test_name = "pidfd_poll check for premature notification on non-empty"
"group leader exit";
TEST(wait_simple)
{
- int pidfd = -1, status = 0;
+ int pidfd = -1;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
.flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
.exit_signal = SIGCHLD,
};
- int ret;
pid_t pid;
siginfo_t info = {
.si_signo = 0,
TEST(wait_states)
{
- int pidfd = -1, status = 0;
+ int pidfd = -1;
pid_t parent_tid = -1;
struct clone_args args = {
.parent_tid = ptr_to_u64(&parent_tid),
static int userfaultfd_stress(void)
{
void *area;
+ char *tmp_area;
unsigned long nr;
struct uffdio_register uffdio_register;
struct uffd_stats uffd_stats[nr_cpus];
count_verify[nr], nr);
/* prepare next bounce */
- swap(area_src, area_dst);
+ tmp_area = area_src;
+ area_src = area_dst;
+ area_dst = tmp_area;
- swap(area_src_alias, area_dst_alias);
+ tmp_area = area_src_alias;
+ area_src_alias = area_dst_alias;
+ area_dst_alias = tmp_area;
uffd_stats_report(uffd_stats, nr_cpus);
}
NAME := rtla
-VERSION := 0.5
+# Follow the kernel version
+VERSION := $(shell cat VERSION 2> /dev/null || make -sC ../../.. kernelversion)
# From libtracefs:
# Makefiles suck: This macro sets a default value of $(2) for the
tarball: clean
rm -rf $(NAME)-$(VERSION) && mkdir $(NAME)-$(VERSION)
+ echo $(VERSION) > $(NAME)-$(VERSION)/VERSION
cp -r $(DIRS) $(FILES) $(NAME)-$(VERSION)
mkdir $(NAME)-$(VERSION)/Documentation/
cp -rp $(SRCTREE)/../../../Documentation/tools/rtla/* $(NAME)-$(VERSION)/Documentation/
*/
void osnoise_destroy_tool(struct osnoise_tool *top)
{
+ if (!top)
+ return;
+
trace_instance_destroy(&top->trace);
if (top->context)
int osnoise_hist_main(int argc, char *argv[])
{
struct osnoise_hist_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *tool = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *tool;
int return_value = 1;
int retval;
out_hist:
osnoise_free_histogram(tool->data);
out_destroy:
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
- if (params->trace_output)
- osnoise_destroy_tool(record);
free(params);
out_exit:
exit(return_value);
int osnoise_top_main(int argc, char **argv)
{
struct osnoise_top_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *tool = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *tool;
int return_value = 1;
int retval;
out_top:
osnoise_free_top(tool->data);
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
- if (params->trace_output)
- osnoise_destroy_tool(record);
out_exit:
exit(return_value);
}
int timerlat_hist_main(int argc, char *argv[])
{
struct timerlat_hist_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *tool = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *tool;
int return_value = 1;
int retval;
out_hist:
timerlat_free_histogram(tool->data);
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(tool);
- if (params->trace_output)
- osnoise_destroy_tool(record);
free(params);
out_exit:
exit(return_value);
int timerlat_top_main(int argc, char *argv[])
{
struct timerlat_top_params *params;
+ struct osnoise_tool *record = NULL;
+ struct osnoise_tool *top = NULL;
struct trace_instance *trace;
- struct osnoise_tool *record;
- struct osnoise_tool *top;
int return_value = 1;
int retval;
out_top:
timerlat_free_top(top->data);
+ osnoise_destroy_tool(record);
osnoise_destroy_tool(top);
- if (params->trace_output)
- osnoise_destroy_tool(record);
free(params);
out_exit:
exit(return_value);
tracer = TRACEFS_TRACER_CUSTOM;
- debug_msg("enabling %s tracer\n", tracer_name);
+ debug_msg("Enabling %s tracer\n", tracer_name);
retval = tracefs_tracer_set(inst, tracer, tracer_name);
if (retval < 0) {
if (errno == ENODEV)
- err_msg("tracer %s not found!\n", tracer_name);
+ err_msg("Tracer %s not found!\n", tracer_name);
- err_msg("failed to enable the tracer %s\n", tracer_name);
+ err_msg("Failed to enable the %s tracer\n", tracer_name);
return -1;
}
retval = tracefs_tracer_set(inst, t);
if (retval < 0)
- err_msg("oops, error disabling tracer\n");
+ err_msg("Oops, error disabling tracer\n");
}
/*
time_t duration;
duration = difftime(now, start_time);
- tm_info = localtime(&duration);
+ tm_info = gmtime(&duration);
snprintf(output, output_size, "%3d %02d:%02d:%02d",
tm_info->tm_yday,
- tm_info->tm_hour - 1,
+ tm_info->tm_hour,
tm_info->tm_min,
tm_info->tm_sec);
}